File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
new file mode 100644
index 0000000..2573612
--- /dev/null
+++ b/drivers/target/Kconfig
@@ -0,0 +1,47 @@
+
+menuconfig TARGET_CORE
+	tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
+	depends on SCSI && BLOCK
+	select CONFIGFS_FS
+	select CRC_T10DIF
+	default n
+	help
+	Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
+	control path for target_core_mod.  This includes built-in TCM RAMDISK
+	subsystem logic for virtual LUN 0 access
+
+if TARGET_CORE
+
+config TCM_IBLOCK
+	tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+	select BLK_DEV_INTEGRITY
+	help
+	Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
+	access to Linux/Block devices using BIO
+
+config TCM_FILEIO
+	tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
+	help
+	Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
+	access to Linux/VFS struct file or struct block_device
+
+config TCM_PSCSI
+	tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+	help
+	Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
+	passthrough access to Linux/SCSI device
+
+config TCM_USER2
+	tristate "TCM/USER Subsystem Plugin for Linux"
+	depends on UIO && NET
+	help
+	Say Y here to enable the TCM/USER subsystem plugin for a userspace
+	process to handle requests. This is version 2 of the ABI; version 1
+	is obsolete.
+
+source "drivers/target/loopback/Kconfig"
+source "drivers/target/tcm_fc/Kconfig"
+source "drivers/target/iscsi/Kconfig"
+source "drivers/target/sbp/Kconfig"
+
+endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
new file mode 100644
index 0000000..e619c02
--- /dev/null
+++ b/drivers/target/Makefile
@@ -0,0 +1,31 @@
+
+target_core_mod-y		:= target_core_configfs.o \
+				   target_core_device.o \
+				   target_core_fabric_configfs.o \
+				   target_core_fabric_lib.o \
+				   target_core_hba.o \
+				   target_core_pr.o \
+				   target_core_alua.o \
+				   target_core_tmr.o \
+				   target_core_tpg.o \
+				   target_core_transport.o \
+				   target_core_sbc.o \
+				   target_core_spc.o \
+				   target_core_ua.o \
+				   target_core_rd.o \
+				   target_core_stat.o \
+				   target_core_xcopy.o
+
+obj-$(CONFIG_TARGET_CORE)	+= target_core_mod.o
+
+# Subsystem modules
+obj-$(CONFIG_TCM_IBLOCK)	+= target_core_iblock.o
+obj-$(CONFIG_TCM_FILEIO)	+= target_core_file.o
+obj-$(CONFIG_TCM_PSCSI)		+= target_core_pscsi.o
+obj-$(CONFIG_TCM_USER2)		+= target_core_user.o
+
+# Fabric modules
+obj-$(CONFIG_LOOPBACK_TARGET)	+= loopback/
+obj-$(CONFIG_TCM_FC)		+= tcm_fc/
+obj-$(CONFIG_ISCSI_TARGET)	+= iscsi/
+obj-$(CONFIG_SBP_TARGET)	+= sbp/
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
new file mode 100644
index 0000000..8345fb4
--- /dev/null
+++ b/drivers/target/iscsi/Kconfig
@@ -0,0 +1,9 @@
+config ISCSI_TARGET
+	tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
+	depends on NET
+	select CRYPTO
+	select CRYPTO_CRC32C
+	select CRYPTO_CRC32C_INTEL if X86
+	help
+	Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
+	Target Mode Stack.
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
new file mode 100644
index 0000000..0f43be9
--- /dev/null
+++ b/drivers/target/iscsi/Makefile
@@ -0,0 +1,20 @@
+iscsi_target_mod-y +=		iscsi_target_parameters.o \
+				iscsi_target_seq_pdu_list.o \
+				iscsi_target_auth.o \
+				iscsi_target_datain_values.o \
+				iscsi_target_device.o \
+				iscsi_target_erl0.o \
+				iscsi_target_erl1.o \
+				iscsi_target_erl2.o \
+				iscsi_target_login.o \
+				iscsi_target_nego.o \
+				iscsi_target_nodeattrib.o \
+				iscsi_target_tmr.o \
+				iscsi_target_tpg.o \
+				iscsi_target_util.o \
+				iscsi_target.o \
+				iscsi_target_configfs.o \
+				iscsi_target_stat.o \
+				iscsi_target_transport.o
+
+obj-$(CONFIG_ISCSI_TARGET)	+= iscsi_target_mod.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
new file mode 100644
index 0000000..fd49341
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -0,0 +1,4880 @@
+/*******************************************************************************
+ * This file contains main functions related to the iSCSI Target Core Driver.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/crypto.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/idr.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_tmr.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_device.h"
+#include <target/iscsi/iscsi_target_stat.h>
+
+#include <target/iscsi/iscsi_transport.h>
+
+static LIST_HEAD(g_tiqn_list);
+static LIST_HEAD(g_np_list);
+static DEFINE_SPINLOCK(tiqn_lock);
+static DEFINE_MUTEX(np_lock);
+
+static struct idr tiqn_idr;
+struct idr sess_idr;
+struct mutex auth_id_lock;
+spinlock_t sess_idr_lock;
+
+struct iscsit_global *iscsit_global;
+
+struct kmem_cache *lio_qr_cache;
+struct kmem_cache *lio_dr_cache;
+struct kmem_cache *lio_ooo_cache;
+struct kmem_cache *lio_r2t_cache;
+
+static int iscsit_handle_immediate_data(struct iscsi_cmd *,
+			struct iscsi_scsi_req *, u32);
+
+struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
+{
+	struct iscsi_tiqn *tiqn = NULL;
+
+	spin_lock(&tiqn_lock);
+	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
+		if (!strcmp(tiqn->tiqn, buf)) {
+
+			spin_lock(&tiqn->tiqn_state_lock);
+			if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
+				tiqn->tiqn_access_count++;
+				spin_unlock(&tiqn->tiqn_state_lock);
+				spin_unlock(&tiqn_lock);
+				return tiqn;
+			}
+			spin_unlock(&tiqn->tiqn_state_lock);
+		}
+	}
+	spin_unlock(&tiqn_lock);
+
+	return NULL;
+}
+
+static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
+{
+	spin_lock(&tiqn->tiqn_state_lock);
+	if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
+		tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
+		spin_unlock(&tiqn->tiqn_state_lock);
+		return 0;
+	}
+	spin_unlock(&tiqn->tiqn_state_lock);
+
+	return -1;
+}
+
+void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
+{
+	spin_lock(&tiqn->tiqn_state_lock);
+	tiqn->tiqn_access_count--;
+	spin_unlock(&tiqn->tiqn_state_lock);
+}
+
+/*
+ * Note that IQN formatting is expected to be done in userspace, and
+ * no explict IQN format checks are done here.
+ */
+struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
+{
+	struct iscsi_tiqn *tiqn = NULL;
+	int ret;
+
+	if (strlen(buf) >= ISCSI_IQN_LEN) {
+		pr_err("Target IQN exceeds %d bytes\n",
+				ISCSI_IQN_LEN);
+		return ERR_PTR(-EINVAL);
+	}
+
+	tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
+	if (!tiqn) {
+		pr_err("Unable to allocate struct iscsi_tiqn\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sprintf(tiqn->tiqn, "%s", buf);
+	INIT_LIST_HEAD(&tiqn->tiqn_list);
+	INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
+	spin_lock_init(&tiqn->tiqn_state_lock);
+	spin_lock_init(&tiqn->tiqn_tpg_lock);
+	spin_lock_init(&tiqn->sess_err_stats.lock);
+	spin_lock_init(&tiqn->login_stats.lock);
+	spin_lock_init(&tiqn->logout_stats.lock);
+
+	tiqn->tiqn_state = TIQN_STATE_ACTIVE;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&tiqn_lock);
+
+	ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
+	if (ret < 0) {
+		pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
+		spin_unlock(&tiqn_lock);
+		idr_preload_end();
+		kfree(tiqn);
+		return ERR_PTR(ret);
+	}
+	tiqn->tiqn_index = ret;
+	list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
+
+	spin_unlock(&tiqn_lock);
+	idr_preload_end();
+
+	pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
+
+	return tiqn;
+
+}
+
+static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
+{
+	/*
+	 * Wait for accesses to said struct iscsi_tiqn to end.
+	 */
+	spin_lock(&tiqn->tiqn_state_lock);
+	while (tiqn->tiqn_access_count != 0) {
+		spin_unlock(&tiqn->tiqn_state_lock);
+		msleep(10);
+		spin_lock(&tiqn->tiqn_state_lock);
+	}
+	spin_unlock(&tiqn->tiqn_state_lock);
+}
+
+void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
+{
+	/*
+	 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
+	 * while holding tiqn->tiqn_state_lock.  This means that all subsequent
+	 * attempts to access this struct iscsi_tiqn will fail from both transport
+	 * fabric and control code paths.
+	 */
+	if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
+		pr_err("iscsit_set_tiqn_shutdown() failed\n");
+		return;
+	}
+
+	iscsit_wait_for_tiqn(tiqn);
+
+	spin_lock(&tiqn_lock);
+	list_del(&tiqn->tiqn_list);
+	idr_remove(&tiqn_idr, tiqn->tiqn_index);
+	spin_unlock(&tiqn_lock);
+
+	pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
+			tiqn->tiqn);
+	kfree(tiqn);
+}
+
+int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+{
+	int ret;
+	/*
+	 * Determine if the network portal is accepting storage traffic.
+	 */
+	spin_lock_bh(&np->np_thread_lock);
+	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+		spin_unlock_bh(&np->np_thread_lock);
+		return -1;
+	}
+	spin_unlock_bh(&np->np_thread_lock);
+	/*
+	 * Determine if the portal group is accepting storage traffic.
+	 */
+	spin_lock_bh(&tpg->tpg_state_lock);
+	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+		spin_unlock_bh(&tpg->tpg_state_lock);
+		return -1;
+	}
+	spin_unlock_bh(&tpg->tpg_state_lock);
+
+	/*
+	 * Here we serialize access across the TIQN+TPG Tuple.
+	 */
+	ret = down_interruptible(&tpg->np_login_sem);
+	if (ret != 0)
+		return -1;
+
+	spin_lock_bh(&tpg->tpg_state_lock);
+	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+		spin_unlock_bh(&tpg->tpg_state_lock);
+		up(&tpg->np_login_sem);
+		return -1;
+	}
+	spin_unlock_bh(&tpg->tpg_state_lock);
+
+	return 0;
+}
+
+void iscsit_login_kref_put(struct kref *kref)
+{
+	struct iscsi_tpg_np *tpg_np = container_of(kref,
+				struct iscsi_tpg_np, tpg_np_kref);
+
+	complete(&tpg_np->tpg_np_comp);
+}
+
+int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
+		       struct iscsi_tpg_np *tpg_np)
+{
+	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+	up(&tpg->np_login_sem);
+
+	if (tpg_np)
+		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
+
+	if (tiqn)
+		iscsit_put_tiqn_for_login(tiqn);
+
+	return 0;
+}
+
+bool iscsit_check_np_match(
+	struct sockaddr_storage *sockaddr,
+	struct iscsi_np *np,
+	int network_transport)
+{
+	struct sockaddr_in *sock_in, *sock_in_e;
+	struct sockaddr_in6 *sock_in6, *sock_in6_e;
+	bool ip_match = false;
+	u16 port, port_e;
+
+	if (sockaddr->ss_family == AF_INET6) {
+		sock_in6 = (struct sockaddr_in6 *)sockaddr;
+		sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
+
+		if (!memcmp(&sock_in6->sin6_addr.in6_u,
+			    &sock_in6_e->sin6_addr.in6_u,
+			    sizeof(struct in6_addr)))
+			ip_match = true;
+
+		port = ntohs(sock_in6->sin6_port);
+		port_e = ntohs(sock_in6_e->sin6_port);
+	} else {
+		sock_in = (struct sockaddr_in *)sockaddr;
+		sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
+
+		if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
+			ip_match = true;
+
+		port = ntohs(sock_in->sin_port);
+		port_e = ntohs(sock_in_e->sin_port);
+	}
+
+	if (ip_match && (port_e == port) &&
+	    (np->np_network_transport == network_transport))
+		return true;
+
+	return false;
+}
+
+/*
+ * Called with mutex np_lock held
+ */
+static struct iscsi_np *iscsit_get_np(
+	struct sockaddr_storage *sockaddr,
+	int network_transport)
+{
+	struct iscsi_np *np;
+	bool match;
+
+	list_for_each_entry(np, &g_np_list, np_list) {
+		spin_lock_bh(&np->np_thread_lock);
+		if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+			spin_unlock_bh(&np->np_thread_lock);
+			continue;
+		}
+
+		match = iscsit_check_np_match(sockaddr, np, network_transport);
+		if (match) {
+			/*
+			 * Increment the np_exports reference count now to
+			 * prevent iscsit_del_np() below from being called
+			 * while iscsi_tpg_add_network_portal() is called.
+			 */
+			np->np_exports++;
+			spin_unlock_bh(&np->np_thread_lock);
+			return np;
+		}
+		spin_unlock_bh(&np->np_thread_lock);
+	}
+
+	return NULL;
+}
+
+struct iscsi_np *iscsit_add_np(
+	struct sockaddr_storage *sockaddr,
+	int network_transport)
+{
+	struct iscsi_np *np;
+	int ret;
+
+	mutex_lock(&np_lock);
+
+	/*
+	 * Locate the existing struct iscsi_np if already active..
+	 */
+	np = iscsit_get_np(sockaddr, network_transport);
+	if (np) {
+		mutex_unlock(&np_lock);
+		return np;
+	}
+
+	np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
+	if (!np) {
+		pr_err("Unable to allocate memory for struct iscsi_np\n");
+		mutex_unlock(&np_lock);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	np->np_flags |= NPF_IP_NETWORK;
+	np->np_network_transport = network_transport;
+	spin_lock_init(&np->np_thread_lock);
+	init_completion(&np->np_restart_comp);
+	INIT_LIST_HEAD(&np->np_list);
+
+	ret = iscsi_target_setup_login_socket(np, sockaddr);
+	if (ret != 0) {
+		kfree(np);
+		mutex_unlock(&np_lock);
+		return ERR_PTR(ret);
+	}
+
+	np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
+	if (IS_ERR(np->np_thread)) {
+		pr_err("Unable to create kthread: iscsi_np\n");
+		ret = PTR_ERR(np->np_thread);
+		kfree(np);
+		mutex_unlock(&np_lock);
+		return ERR_PTR(ret);
+	}
+	/*
+	 * Increment the np_exports reference count now to prevent
+	 * iscsit_del_np() below from being run while a new call to
+	 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
+	 * active.  We don't need to hold np->np_thread_lock at this
+	 * point because iscsi_np has not been added to g_np_list yet.
+	 */
+	np->np_exports = 1;
+	np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+
+	list_add_tail(&np->np_list, &g_np_list);
+	mutex_unlock(&np_lock);
+
+	pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
+		&np->np_sockaddr, np->np_transport->name);
+
+	return np;
+}
+
+int iscsit_reset_np_thread(
+	struct iscsi_np *np,
+	struct iscsi_tpg_np *tpg_np,
+	struct iscsi_portal_group *tpg,
+	bool shutdown)
+{
+	spin_lock_bh(&np->np_thread_lock);
+	if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
+		spin_unlock_bh(&np->np_thread_lock);
+		return 0;
+	}
+	np->np_thread_state = ISCSI_NP_THREAD_RESET;
+	atomic_inc(&np->np_reset_count);
+
+	if (np->np_thread) {
+		spin_unlock_bh(&np->np_thread_lock);
+		send_sig(SIGINT, np->np_thread, 1);
+		wait_for_completion(&np->np_restart_comp);
+		spin_lock_bh(&np->np_thread_lock);
+	}
+	spin_unlock_bh(&np->np_thread_lock);
+
+	if (tpg_np && shutdown) {
+		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
+
+		wait_for_completion(&tpg_np->tpg_np_comp);
+	}
+
+	return 0;
+}
+
+static void iscsit_free_np(struct iscsi_np *np)
+{
+	if (np->np_socket)
+		sock_release(np->np_socket);
+}
+
+int iscsit_del_np(struct iscsi_np *np)
+{
+	spin_lock_bh(&np->np_thread_lock);
+	np->np_exports--;
+	if (np->np_exports) {
+		np->enabled = true;
+		spin_unlock_bh(&np->np_thread_lock);
+		return 0;
+	}
+	np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
+	spin_unlock_bh(&np->np_thread_lock);
+
+	if (np->np_thread) {
+		/*
+		 * We need to send the signal to wakeup Linux/Net
+		 * which may be sleeping in sock_accept()..
+		 */
+		send_sig(SIGINT, np->np_thread, 1);
+		kthread_stop(np->np_thread);
+		np->np_thread = NULL;
+	}
+
+	np->np_transport->iscsit_free_np(np);
+
+	mutex_lock(&np_lock);
+	list_del(&np->np_list);
+	mutex_unlock(&np_lock);
+
+	pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
+		&np->np_sockaddr, np->np_transport->name);
+
+	iscsit_put_transport(np->np_transport);
+	kfree(np);
+	return 0;
+}
+
+static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+
+static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+	return 0;
+}
+
+static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+	bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
+
+	spin_lock_bh(&conn->cmd_lock);
+	if (!list_empty(&cmd->i_conn_node) &&
+	    !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
+		list_del_init(&cmd->i_conn_node);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	__iscsit_free_cmd(cmd, scsi_cmd, true);
+}
+
+static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+	return TARGET_PROT_NORMAL;
+}
+
+static struct iscsit_transport iscsi_target_transport = {
+	.name			= "iSCSI/TCP",
+	.transport_type		= ISCSI_TCP,
+	.owner			= NULL,
+	.iscsit_setup_np	= iscsit_setup_np,
+	.iscsit_accept_np	= iscsit_accept_np,
+	.iscsit_free_np		= iscsit_free_np,
+	.iscsit_get_login_rx	= iscsit_get_login_rx,
+	.iscsit_put_login_tx	= iscsit_put_login_tx,
+	.iscsit_get_dataout	= iscsit_build_r2ts_for_cmd,
+	.iscsit_immediate_queue	= iscsit_immediate_queue,
+	.iscsit_response_queue	= iscsit_response_queue,
+	.iscsit_queue_data_in	= iscsit_queue_rsp,
+	.iscsit_queue_status	= iscsit_queue_rsp,
+	.iscsit_aborted_task	= iscsit_aborted_task,
+	.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
+};
+
+static int __init iscsi_target_init_module(void)
+{
+	int ret = 0, size;
+
+	pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
+
+	iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
+	if (!iscsit_global) {
+		pr_err("Unable to allocate memory for iscsit_global\n");
+		return -1;
+	}
+	spin_lock_init(&iscsit_global->ts_bitmap_lock);
+	mutex_init(&auth_id_lock);
+	spin_lock_init(&sess_idr_lock);
+	idr_init(&tiqn_idr);
+	idr_init(&sess_idr);
+
+	ret = target_register_template(&iscsi_ops);
+	if (ret)
+		goto out;
+
+	size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
+	iscsit_global->ts_bitmap = vzalloc(size);
+	if (!iscsit_global->ts_bitmap) {
+		pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
+		goto configfs_out;
+	}
+
+	lio_qr_cache = kmem_cache_create("lio_qr_cache",
+			sizeof(struct iscsi_queue_req),
+			__alignof__(struct iscsi_queue_req), 0, NULL);
+	if (!lio_qr_cache) {
+		pr_err("nable to kmem_cache_create() for"
+				" lio_qr_cache\n");
+		goto bitmap_out;
+	}
+
+	lio_dr_cache = kmem_cache_create("lio_dr_cache",
+			sizeof(struct iscsi_datain_req),
+			__alignof__(struct iscsi_datain_req), 0, NULL);
+	if (!lio_dr_cache) {
+		pr_err("Unable to kmem_cache_create() for"
+				" lio_dr_cache\n");
+		goto qr_out;
+	}
+
+	lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
+			sizeof(struct iscsi_ooo_cmdsn),
+			__alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
+	if (!lio_ooo_cache) {
+		pr_err("Unable to kmem_cache_create() for"
+				" lio_ooo_cache\n");
+		goto dr_out;
+	}
+
+	lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
+			sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
+			0, NULL);
+	if (!lio_r2t_cache) {
+		pr_err("Unable to kmem_cache_create() for"
+				" lio_r2t_cache\n");
+		goto ooo_out;
+	}
+
+	iscsit_register_transport(&iscsi_target_transport);
+
+	if (iscsit_load_discovery_tpg() < 0)
+		goto r2t_out;
+
+	return ret;
+r2t_out:
+	iscsit_unregister_transport(&iscsi_target_transport);
+	kmem_cache_destroy(lio_r2t_cache);
+ooo_out:
+	kmem_cache_destroy(lio_ooo_cache);
+dr_out:
+	kmem_cache_destroy(lio_dr_cache);
+qr_out:
+	kmem_cache_destroy(lio_qr_cache);
+bitmap_out:
+	vfree(iscsit_global->ts_bitmap);
+configfs_out:
+	/* XXX: this probably wants it to be it's own unwind step.. */
+	if (iscsit_global->discovery_tpg)
+		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+	target_unregister_template(&iscsi_ops);
+out:
+	kfree(iscsit_global);
+	return -ENOMEM;
+}
+
+static void __exit iscsi_target_cleanup_module(void)
+{
+	iscsit_release_discovery_tpg();
+	iscsit_unregister_transport(&iscsi_target_transport);
+	kmem_cache_destroy(lio_qr_cache);
+	kmem_cache_destroy(lio_dr_cache);
+	kmem_cache_destroy(lio_ooo_cache);
+	kmem_cache_destroy(lio_r2t_cache);
+
+	/*
+	 * Shutdown discovery sessions and disable discovery TPG
+	 */
+	if (iscsit_global->discovery_tpg)
+		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+
+	target_unregister_template(&iscsi_ops);
+
+	vfree(iscsit_global->ts_bitmap);
+	kfree(iscsit_global);
+}
+
+static int iscsit_add_reject(
+	struct iscsi_conn *conn,
+	u8 reason,
+	unsigned char *buf)
+{
+	struct iscsi_cmd *cmd;
+
+	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+	if (!cmd)
+		return -1;
+
+	cmd->iscsi_opcode = ISCSI_OP_REJECT;
+	cmd->reject_reason = reason;
+
+	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
+	if (!cmd->buf_ptr) {
+		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
+		iscsit_free_cmd(cmd, false);
+		return -1;
+	}
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	cmd->i_state = ISTATE_SEND_REJECT;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	return -1;
+}
+
+static int iscsit_add_reject_from_cmd(
+	struct iscsi_cmd *cmd,
+	u8 reason,
+	bool add_to_conn,
+	unsigned char *buf)
+{
+	struct iscsi_conn *conn;
+
+	if (!cmd->conn) {
+		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
+				cmd->init_task_tag);
+		return -1;
+	}
+	conn = cmd->conn;
+
+	cmd->iscsi_opcode = ISCSI_OP_REJECT;
+	cmd->reject_reason = reason;
+
+	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
+	if (!cmd->buf_ptr) {
+		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
+		iscsit_free_cmd(cmd, false);
+		return -1;
+	}
+
+	if (add_to_conn) {
+		spin_lock_bh(&conn->cmd_lock);
+		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+		spin_unlock_bh(&conn->cmd_lock);
+	}
+
+	cmd->i_state = ISTATE_SEND_REJECT;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+	/*
+	 * Perform the kref_put now if se_cmd has already been setup by
+	 * scsit_setup_scsi_cmd()
+	 */
+	if (cmd->se_cmd.se_tfo != NULL) {
+		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
+		target_put_sess_cmd(&cmd->se_cmd);
+	}
+	return -1;
+}
+
+static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
+				 unsigned char *buf)
+{
+	return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
+}
+
+int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
+{
+	return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
+}
+
+/*
+ * Map some portion of the allocated scatterlist to an iovec, suitable for
+ * kernel sockets to copy data in/out.
+ */
+static int iscsit_map_iovec(
+	struct iscsi_cmd *cmd,
+	struct kvec *iov,
+	u32 data_offset,
+	u32 data_length)
+{
+	u32 i = 0;
+	struct scatterlist *sg;
+	unsigned int page_off;
+
+	/*
+	 * We know each entry in t_data_sg contains a page.
+	 */
+	sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
+	page_off = (data_offset % PAGE_SIZE);
+
+	cmd->first_data_sg = sg;
+	cmd->first_data_sg_off = page_off;
+
+	while (data_length) {
+		u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+
+		iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
+		iov[i].iov_len = cur_len;
+
+		data_length -= cur_len;
+		page_off = 0;
+		sg = sg_next(sg);
+		i++;
+	}
+
+	cmd->kmapped_nents = i;
+
+	return i;
+}
+
+static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
+{
+	u32 i;
+	struct scatterlist *sg;
+
+	sg = cmd->first_data_sg;
+
+	for (i = 0; i < cmd->kmapped_nents; i++)
+		kunmap(sg_page(&sg[i]));
+}
+
+static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
+{
+	LIST_HEAD(ack_list);
+	struct iscsi_cmd *cmd, *cmd_p;
+
+	conn->exp_statsn = exp_statsn;
+
+	if (conn->sess->sess_ops->RDMAExtensions)
+		return;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
+		spin_lock(&cmd->istate_lock);
+		if ((cmd->i_state == ISTATE_SENT_STATUS) &&
+		    iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
+			cmd->i_state = ISTATE_REMOVE;
+			spin_unlock(&cmd->istate_lock);
+			list_move_tail(&cmd->i_conn_node, &ack_list);
+			continue;
+		}
+		spin_unlock(&cmd->istate_lock);
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
+		list_del_init(&cmd->i_conn_node);
+		iscsit_free_cmd(cmd, false);
+	}
+}
+
+static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
+{
+	u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
+
+	iov_count += ISCSI_IOV_DATA_BUFFER;
+
+	cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
+	if (!cmd->iov_data) {
+		pr_err("Unable to allocate cmd->iov_data\n");
+		return -ENOMEM;
+	}
+
+	cmd->orig_iov_data_count = iov_count;
+	return 0;
+}
+
+int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			  unsigned char *buf)
+{
+	int data_direction, payload_length;
+	struct iscsi_scsi_req *hdr;
+	int iscsi_task_attr;
+	int sam_task_attr;
+
+	atomic_long_inc(&conn->sess->cmd_pdus);
+
+	hdr			= (struct iscsi_scsi_req *) buf;
+	payload_length		= ntoh24(hdr->dlength);
+
+	/* FIXME; Add checks for AdditionalHeaderSegment */
+
+	if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
+	    !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+		pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
+				" not set. Bad iSCSI Initiator.\n");
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
+	}
+
+	if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
+	     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
+		/*
+		 * From RFC-3720 Section 10.3.1:
+		 *
+		 * "Either or both of R and W MAY be 1 when either the
+		 *  Expected Data Transfer Length and/or Bidirectional Read
+		 *  Expected Data Transfer Length are 0"
+		 *
+		 * For this case, go ahead and clear the unnecssary bits
+		 * to avoid any confusion with ->data_direction.
+		 */
+		hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+		hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+
+		pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+			" set when Expected Data Transfer Length is 0 for"
+			" CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
+	}
+
+	if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
+	    !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
+		pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
+			" MUST be set if Expected Data Transfer Length is not 0."
+			" Bad iSCSI Initiator\n");
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
+	}
+
+	if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
+	    (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
+		pr_err("Bidirectional operations not supported!\n");
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
+	}
+
+	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+		pr_err("Illegally set Immediate Bit in iSCSI Initiator"
+				" Scsi Command PDU.\n");
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
+	}
+
+	if (payload_length && !conn->sess->sess_ops->ImmediateData) {
+		pr_err("ImmediateData=No but DataSegmentLength=%u,"
+			" protocol error.\n", payload_length);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_PROTOCOL_ERROR, buf);
+	}
+
+	if ((be32_to_cpu(hdr->data_length) == payload_length) &&
+	    (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
+		pr_err("Expected Data Transfer Length and Length of"
+			" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
+			" bit is not set protocol error\n");
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_PROTOCOL_ERROR, buf);
+	}
+
+	if (payload_length > be32_to_cpu(hdr->data_length)) {
+		pr_err("DataSegmentLength: %u is greater than"
+			" EDTL: %u, protocol error.\n", payload_length,
+				hdr->data_length);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_PROTOCOL_ERROR, buf);
+	}
+
+	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
+		pr_err("DataSegmentLength: %u is greater than"
+			" MaxXmitDataSegmentLength: %u, protocol error.\n",
+			payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_PROTOCOL_ERROR, buf);
+	}
+
+	if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
+		pr_err("DataSegmentLength: %u is greater than"
+			" FirstBurstLength: %u, protocol error.\n",
+			payload_length, conn->sess->sess_ops->FirstBurstLength);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
+	}
+
+	data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
+			 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
+			  DMA_NONE;
+
+	cmd->data_direction = data_direction;
+	iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
+	/*
+	 * Figure out the SAM Task Attribute for the incoming SCSI CDB
+	 */
+	if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
+	    (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
+		sam_task_attr = TCM_SIMPLE_TAG;
+	else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
+		sam_task_attr = TCM_ORDERED_TAG;
+	else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
+		sam_task_attr = TCM_HEAD_TAG;
+	else if (iscsi_task_attr == ISCSI_ATTR_ACA)
+		sam_task_attr = TCM_ACA_TAG;
+	else {
+		pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
+			" TCM_SIMPLE_TAG\n", iscsi_task_attr);
+		sam_task_attr = TCM_SIMPLE_TAG;
+	}
+
+	cmd->iscsi_opcode	= ISCSI_OP_SCSI_CMD;
+	cmd->i_state		= ISTATE_NEW_CMD;
+	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+	cmd->immediate_data	= (payload_length) ? 1 : 0;
+	cmd->unsolicited_data	= ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
+				     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
+	if (cmd->unsolicited_data)
+		cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
+
+	conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+	if (hdr->flags & ISCSI_FLAG_CMD_READ)
+		cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
+	else
+		cmd->targ_xfer_tag = 0xFFFFFFFF;
+	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
+	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
+	cmd->first_burst_len	= payload_length;
+
+	if (!conn->sess->sess_ops->RDMAExtensions &&
+	     cmd->data_direction == DMA_FROM_DEVICE) {
+		struct iscsi_datain_req *dr;
+
+		dr = iscsit_allocate_datain_req();
+		if (!dr)
+			return iscsit_add_reject_cmd(cmd,
+					ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+
+		iscsit_attach_datain_req(cmd, dr);
+	}
+
+	/*
+	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+	 */
+	transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+			conn->sess->se_sess, be32_to_cpu(hdr->data_length),
+			cmd->data_direction, sam_task_attr,
+			cmd->sense_buffer + 2);
+
+	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
+		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
+		hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
+		conn->cid);
+
+	target_get_sess_cmd(&cmd->se_cmd, true);
+
+	cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
+						     scsilun_to_int(&hdr->lun));
+	if (cmd->sense_reason)
+		goto attach_cmd;
+
+	/* only used for printks or comparing with ->ref_task_tag */
+	cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
+	cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
+	if (cmd->sense_reason) {
+		if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
+			return iscsit_add_reject_cmd(cmd,
+					ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+		}
+
+		goto attach_cmd;
+	}
+
+	if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
+		return iscsit_add_reject_cmd(cmd,
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+	}
+
+attach_cmd:
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+	/*
+	 * Check if we need to delay processing because of ALUA
+	 * Active/NonOptimized primary access state..
+	 */
+	core_alua_check_nonop_delay(&cmd->se_cmd);
+
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
+
+void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
+{
+	iscsit_set_dataout_sequence_values(cmd);
+
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	iscsit_start_dataout_timer(cmd, cmd->conn);
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
+EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout);
+
+int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			    struct iscsi_scsi_req *hdr)
+{
+	int cmdsn_ret = 0;
+	/*
+	 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
+	 * the Immediate Bit is not set, and no Immediate
+	 * Data is attached.
+	 *
+	 * A PDU/CmdSN carrying Immediate Data can only
+	 * be processed after the DataCRC has passed.
+	 * If the DataCRC fails, the CmdSN MUST NOT
+	 * be acknowledged. (See below)
+	 */
+	if (!cmd->immediate_data) {
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+					(unsigned char *)hdr, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return -1;
+		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+			target_put_sess_cmd(&cmd->se_cmd);
+			return 0;
+		}
+	}
+
+	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
+
+	/*
+	 * If no Immediate Data is attached, it's OK to return now.
+	 */
+	if (!cmd->immediate_data) {
+		if (!cmd->sense_reason && cmd->unsolicited_data)
+			iscsit_set_unsoliticed_dataout(cmd);
+		if (!cmd->sense_reason)
+			return 0;
+
+		target_put_sess_cmd(&cmd->se_cmd);
+		return 0;
+	}
+
+	/*
+	 * Early CHECK_CONDITIONs with ImmediateData never make it to command
+	 * execution.  These exceptions are processed in CmdSN order using
+	 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
+	 */
+	if (cmd->sense_reason) {
+		if (cmd->reject_reason)
+			return 0;
+
+		return 1;
+	}
+	/*
+	 * Call directly into transport_generic_new_cmd() to perform
+	 * the backend memory allocation.
+	 */
+	cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
+	if (cmd->sense_reason)
+		return 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_process_scsi_cmd);
+
+static int
+iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+			  bool dump_payload)
+{
+	int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+	/*
+	 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
+	 */
+	if (dump_payload)
+		goto after_immediate_data;
+	/*
+	 * Check for underflow case where both EDTL and immediate data payload
+	 * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
+	 * already been set in target_cmd_size_check() as se_cmd->data_length.
+	 *
+	 * For this special case, fail the command and dump the immediate data
+	 * payload.
+	 */
+	if (cmd->first_burst_len > cmd->se_cmd.data_length) {
+		cmd->sense_reason = TCM_INVALID_CDB_FIELD;
+		goto after_immediate_data;
+	}
+
+	immed_ret = iscsit_handle_immediate_data(cmd, hdr,
+					cmd->first_burst_len);
+after_immediate_data:
+	if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
+		/*
+		 * A PDU/CmdSN carrying Immediate Data passed
+		 * DataCRC, check against ExpCmdSN/MaxCmdSN if
+		 * Immediate Bit is not set.
+		 */
+		cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
+					(unsigned char *)hdr, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return -1;
+
+		if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+			int rc;
+
+			rc = iscsit_dump_data_payload(cmd->conn,
+						      cmd->first_burst_len, 1);
+			target_put_sess_cmd(&cmd->se_cmd);
+			return rc;
+		} else if (cmd->unsolicited_data)
+			iscsit_set_unsoliticed_dataout(cmd);
+
+	} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
+		/*
+		 * Immediate Data failed DataCRC and ERL>=1,
+		 * silently drop this PDU and let the initiator
+		 * plug the CmdSN gap.
+		 *
+		 * FIXME: Send Unsolicited NOPIN with reserved
+		 * TTT here to help the initiator figure out
+		 * the missing CmdSN, although they should be
+		 * intelligent enough to determine the missing
+		 * CmdSN and issue a retry to plug the sequence.
+		 */
+		cmd->i_state = ISTATE_REMOVE;
+		iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
+	} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
+		return -1;
+
+	return 0;
+}
+
+static int
+iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			   unsigned char *buf)
+{
+	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
+	int rc, immed_data;
+	bool dump_payload = false;
+
+	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
+	if (rc < 0)
+		return 0;
+	/*
+	 * Allocation iovecs needed for struct socket operations for
+	 * traditional iSCSI block I/O.
+	 */
+	if (iscsit_allocate_iovecs(cmd) < 0) {
+		return iscsit_reject_cmd(cmd,
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+	}
+	immed_data = cmd->immediate_data;
+
+	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
+	if (rc < 0)
+		return rc;
+	else if (rc > 0)
+		dump_payload = true;
+
+	if (!immed_data)
+		return 0;
+
+	return iscsit_get_immediate_data(cmd, hdr, dump_payload);
+}
+
+static u32 iscsit_do_crypto_hash_sg(
+	struct hash_desc *hash,
+	struct iscsi_cmd *cmd,
+	u32 data_offset,
+	u32 data_length,
+	u32 padding,
+	u8 *pad_bytes)
+{
+	u32 data_crc;
+	struct scatterlist *sg;
+	unsigned int page_off;
+
+	crypto_hash_init(hash);
+
+	sg = cmd->first_data_sg;
+	page_off = cmd->first_data_sg_off;
+
+	while (data_length) {
+		u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
+
+		crypto_hash_update(hash, sg, cur_len);
+
+		data_length -= cur_len;
+		page_off = 0;
+		/* iscsit_map_iovec has already checked for invalid sg pointers */
+		sg = sg_next(sg);
+	}
+
+	if (padding) {
+		struct scatterlist pad_sg;
+
+		sg_init_one(&pad_sg, pad_bytes, padding);
+		crypto_hash_update(hash, &pad_sg, padding);
+	}
+	crypto_hash_final(hash, (u8 *) &data_crc);
+
+	return data_crc;
+}
+
+static void iscsit_do_crypto_hash_buf(
+	struct hash_desc *hash,
+	const void *buf,
+	u32 payload_length,
+	u32 padding,
+	u8 *pad_bytes,
+	u8 *data_crc)
+{
+	struct scatterlist sg;
+
+	crypto_hash_init(hash);
+
+	sg_init_one(&sg, buf, payload_length);
+	crypto_hash_update(hash, &sg, payload_length);
+
+	if (padding) {
+		sg_init_one(&sg, pad_bytes, padding);
+		crypto_hash_update(hash, &sg, padding);
+	}
+	crypto_hash_final(hash, data_crc);
+}
+
+int
+iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
+			  struct iscsi_cmd **out_cmd)
+{
+	struct iscsi_data *hdr = (struct iscsi_data *)buf;
+	struct iscsi_cmd *cmd = NULL;
+	struct se_cmd *se_cmd;
+	u32 payload_length = ntoh24(hdr->dlength);
+	int rc;
+
+	if (!payload_length) {
+		pr_warn("DataOUT payload is ZERO, ignoring.\n");
+		return 0;
+	}
+
+	/* iSCSI write */
+	atomic_long_add(payload_length, &conn->sess->rx_data_octets);
+
+	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
+		pr_err("DataSegmentLength: %u is greater than"
+			" MaxXmitDataSegmentLength: %u\n", payload_length,
+			conn->conn_ops->MaxXmitDataSegmentLength);
+		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					 buf);
+	}
+
+	cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
+			payload_length);
+	if (!cmd)
+		return 0;
+
+	pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
+		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+		hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
+		payload_length, conn->cid);
+
+	if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+		pr_err("Command ITT: 0x%08x received DataOUT after"
+			" last DataOUT received, dumping payload\n",
+			cmd->init_task_tag);
+		return iscsit_dump_data_payload(conn, payload_length, 1);
+	}
+
+	if (cmd->data_direction != DMA_TO_DEVICE) {
+		pr_err("Command ITT: 0x%08x received DataOUT for a"
+			" NON-WRITE command.\n", cmd->init_task_tag);
+		return iscsit_dump_data_payload(conn, payload_length, 1);
+	}
+	se_cmd = &cmd->se_cmd;
+	iscsit_mod_dataout_timer(cmd);
+
+	if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
+		pr_err("DataOut Offset: %u, Length %u greater than"
+			" iSCSI Command EDTL %u, protocol error.\n",
+			hdr->offset, payload_length, cmd->se_cmd.data_length);
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
+	}
+
+	if (cmd->unsolicited_data) {
+		int dump_unsolicited_data = 0;
+
+		if (conn->sess->sess_ops->InitialR2T) {
+			pr_err("Received unexpected unsolicited data"
+				" while InitialR2T=Yes, protocol error.\n");
+			transport_send_check_condition_and_sense(&cmd->se_cmd,
+					TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
+			return -1;
+		}
+		/*
+		 * Special case for dealing with Unsolicited DataOUT
+		 * and Unsupported SAM WRITE Opcodes and SE resource allocation
+		 * failures;
+		 */
+
+		/* Something's amiss if we're not in WRITE_PENDING state... */
+		WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
+		if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
+			dump_unsolicited_data = 1;
+
+		if (dump_unsolicited_data) {
+			/*
+			 * Check if a delayed TASK_ABORTED status needs to
+			 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
+			 * received with the unsolicitied data out.
+			 */
+			if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+				iscsit_stop_dataout_timer(cmd);
+
+			transport_check_aborted_status(se_cmd,
+					(hdr->flags & ISCSI_FLAG_CMD_FINAL));
+			return iscsit_dump_data_payload(conn, payload_length, 1);
+		}
+	} else {
+		/*
+		 * For the normal solicited data path:
+		 *
+		 * Check for a delayed TASK_ABORTED status and dump any
+		 * incoming data out payload if one exists.  Also, when the
+		 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
+		 * data out sequence, we decrement outstanding_r2ts.  Once
+		 * outstanding_r2ts reaches zero, go ahead and send the delayed
+		 * TASK_ABORTED status.
+		 */
+		if (se_cmd->transport_state & CMD_T_ABORTED) {
+			if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+				if (--cmd->outstanding_r2ts < 1) {
+					iscsit_stop_dataout_timer(cmd);
+					transport_check_aborted_status(
+							se_cmd, 1);
+				}
+
+			return iscsit_dump_data_payload(conn, payload_length, 1);
+		}
+	}
+	/*
+	 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
+	 * within-command recovery checks before receiving the payload.
+	 */
+	rc = iscsit_check_pre_dataout(cmd, buf);
+	if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
+		return 0;
+	else if (rc == DATAOUT_CANNOT_RECOVER)
+		return -1;
+
+	*out_cmd = cmd;
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_check_dataout_hdr);
+
+static int
+iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+		   struct iscsi_data *hdr)
+{
+	struct kvec *iov;
+	u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
+	u32 payload_length = ntoh24(hdr->dlength);
+	int iov_ret, data_crc_failed = 0;
+
+	rx_size += payload_length;
+	iov = &cmd->iov_data[0];
+
+	iov_ret = iscsit_map_iovec(cmd, iov, be32_to_cpu(hdr->offset),
+				   payload_length);
+	if (iov_ret < 0)
+		return -1;
+
+	iov_count += iov_ret;
+
+	padding = ((-payload_length) & 3);
+	if (padding != 0) {
+		iov[iov_count].iov_base	= cmd->pad_bytes;
+		iov[iov_count++].iov_len = padding;
+		rx_size += padding;
+		pr_debug("Receiving %u padding bytes.\n", padding);
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		iov[iov_count].iov_base = &checksum;
+		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+		rx_size += ISCSI_CRC_LEN;
+	}
+
+	rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
+
+	iscsit_unmap_iovec(cmd);
+
+	if (rx_got != rx_size)
+		return -1;
+
+	if (conn->conn_ops->DataDigest) {
+		u32 data_crc;
+
+		data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+						    be32_to_cpu(hdr->offset),
+						    payload_length, padding,
+						    cmd->pad_bytes);
+
+		if (checksum != data_crc) {
+			pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
+				" DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
+				" does not match computed 0x%08x\n",
+				hdr->itt, hdr->offset, payload_length,
+				hdr->datasn, checksum, data_crc);
+			data_crc_failed = 1;
+		} else {
+			pr_debug("Got CRC32C DataDigest 0x%08x for"
+				" %u bytes of Data Out\n", checksum,
+				payload_length);
+		}
+	}
+
+	return data_crc_failed;
+}
+
+int
+iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr,
+			     bool data_crc_failed)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	int rc, ooo_cmdsn;
+	/*
+	 * Increment post receive data and CRC values or perform
+	 * within-command recovery.
+	 */
+	rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
+	if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
+		return 0;
+	else if (rc == DATAOUT_SEND_R2T) {
+		iscsit_set_dataout_sequence_values(cmd);
+		conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
+	} else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
+		/*
+		 * Handle extra special case for out of order
+		 * Unsolicited Data Out.
+		 */
+		spin_lock_bh(&cmd->istate_lock);
+		ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
+		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+		spin_unlock_bh(&cmd->istate_lock);
+
+		iscsit_stop_dataout_timer(cmd);
+		if (ooo_cmdsn)
+			return 0;
+		target_execute_cmd(&cmd->se_cmd);
+		return 0;
+	} else /* DATAOUT_CANNOT_RECOVER */
+		return -1;
+
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_check_dataout_payload);
+
+static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+{
+	struct iscsi_cmd *cmd = NULL;
+	struct iscsi_data *hdr = (struct iscsi_data *)buf;
+	int rc;
+	bool data_crc_failed = false;
+
+	rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
+	if (rc < 0)
+		return 0;
+	else if (!cmd)
+		return 0;
+
+	rc = iscsit_get_dataout(conn, cmd, hdr);
+	if (rc < 0)
+		return rc;
+	else if (rc > 0)
+		data_crc_failed = true;
+
+	return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
+}
+
+int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			 struct iscsi_nopout *hdr)
+{
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+		pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
+		if (!cmd)
+			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+						 (unsigned char *)hdr);
+		
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+					 (unsigned char *)hdr);
+	}
+
+	if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+		pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
+			" not set, protocol error.\n");
+		if (!cmd)
+			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+						 (unsigned char *)hdr);
+
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+					 (unsigned char *)hdr);
+	}
+
+	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
+		pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
+			" greater than MaxXmitDataSegmentLength: %u, protocol"
+			" error.\n", payload_length,
+			conn->conn_ops->MaxXmitDataSegmentLength);
+		if (!cmd)
+			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+						 (unsigned char *)hdr);
+
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+					 (unsigned char *)hdr);
+	}
+
+	pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
+		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
+		hdr->itt == RESERVED_ITT ? "Response" : "Request",
+		hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
+		payload_length);
+	/*
+	 * This is not a response to a Unsolicited NopIN, which means
+	 * it can either be a NOPOUT ping request (with a valid ITT),
+	 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
+	 * Either way, make sure we allocate an struct iscsi_cmd, as both
+	 * can contain ping data.
+	 */
+	if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+		cmd->iscsi_opcode	= ISCSI_OP_NOOP_OUT;
+		cmd->i_state		= ISTATE_SEND_NOPIN;
+		cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
+						1 : 0);
+		conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+		cmd->targ_xfer_tag	= 0xFFFFFFFF;
+		cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
+		cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
+		cmd->data_direction	= DMA_NONE;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_setup_nop_out);
+
+int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			   struct iscsi_nopout *hdr)
+{
+	struct iscsi_cmd *cmd_p = NULL;
+	int cmdsn_ret = 0;
+	/*
+	 * Initiator is expecting a NopIN ping reply..
+	 */
+	if (hdr->itt != RESERVED_ITT) {
+		if (!cmd)
+			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+						(unsigned char *)hdr);
+
+		spin_lock_bh(&conn->cmd_lock);
+		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+		spin_unlock_bh(&conn->cmd_lock);
+
+		iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
+
+		if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+			iscsit_add_cmd_to_response_queue(cmd, conn,
+							 cmd->i_state);
+			return 0;
+		}
+
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+				(unsigned char *)hdr, hdr->cmdsn);
+                if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+			return 0;
+		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return -1;
+
+		return 0;
+	}
+	/*
+	 * This was a response to a unsolicited NOPIN ping.
+	 */
+	if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+		cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
+		if (!cmd_p)
+			return -EINVAL;
+
+		iscsit_stop_nopin_response_timer(conn);
+
+		cmd_p->i_state = ISTATE_REMOVE;
+		iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
+
+		iscsit_start_nopin_timer(conn);
+		return 0;
+	}
+	/*
+	 * Otherwise, initiator is not expecting a NOPIN is response.
+	 * Just ignore for now.
+	 */
+        return 0;
+}
+EXPORT_SYMBOL(iscsit_process_nop_out);
+
+static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+				 unsigned char *buf)
+{
+	unsigned char *ping_data = NULL;
+	struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
+	struct kvec *iov = NULL;
+	u32 payload_length = ntoh24(hdr->dlength);
+	int ret;
+
+	ret = iscsit_setup_nop_out(conn, cmd, hdr);
+	if (ret < 0)
+		return 0;
+	/*
+	 * Handle NOP-OUT payload for traditional iSCSI sockets
+	 */
+	if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+		u32 checksum, data_crc, padding = 0;
+		int niov = 0, rx_got, rx_size = payload_length;
+
+		ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
+		if (!ping_data) {
+			pr_err("Unable to allocate memory for"
+				" NOPOUT ping data.\n");
+			ret = -1;
+			goto out;
+		}
+
+		iov = &cmd->iov_misc[0];
+		iov[niov].iov_base	= ping_data;
+		iov[niov++].iov_len	= payload_length;
+
+		padding = ((-payload_length) & 3);
+		if (padding != 0) {
+			pr_debug("Receiving %u additional bytes"
+				" for padding.\n", padding);
+			iov[niov].iov_base	= &cmd->pad_bytes;
+			iov[niov++].iov_len	= padding;
+			rx_size += padding;
+		}
+		if (conn->conn_ops->DataDigest) {
+			iov[niov].iov_base	= &checksum;
+			iov[niov++].iov_len	= ISCSI_CRC_LEN;
+			rx_size += ISCSI_CRC_LEN;
+		}
+
+		rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
+		if (rx_got != rx_size) {
+			ret = -1;
+			goto out;
+		}
+
+		if (conn->conn_ops->DataDigest) {
+			iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+					ping_data, payload_length,
+					padding, cmd->pad_bytes,
+					(u8 *)&data_crc);
+
+			if (checksum != data_crc) {
+				pr_err("Ping data CRC32C DataDigest"
+				" 0x%08x does not match computed 0x%08x\n",
+					checksum, data_crc);
+				if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+					pr_err("Unable to recover from"
+					" NOPOUT Ping DataCRC failure while in"
+						" ERL=0.\n");
+					ret = -1;
+					goto out;
+				} else {
+					/*
+					 * Silently drop this PDU and let the
+					 * initiator plug the CmdSN gap.
+					 */
+					pr_debug("Dropping NOPOUT"
+					" Command CmdSN: 0x%08x due to"
+					" DataCRC error.\n", hdr->cmdsn);
+					ret = 0;
+					goto out;
+				}
+			} else {
+				pr_debug("Got CRC32C DataDigest"
+				" 0x%08x for %u bytes of ping data.\n",
+					checksum, payload_length);
+			}
+		}
+
+		ping_data[payload_length] = '\0';
+		/*
+		 * Attach ping data to struct iscsi_cmd->buf_ptr.
+		 */
+		cmd->buf_ptr = ping_data;
+		cmd->buf_ptr_size = payload_length;
+
+		pr_debug("Got %u bytes of NOPOUT ping"
+			" data.\n", payload_length);
+		pr_debug("Ping Data: \"%s\"\n", ping_data);
+	}
+
+	return iscsit_process_nop_out(conn, cmd, hdr);
+out:
+	if (cmd)
+		iscsit_free_cmd(cmd, false);
+
+	kfree(ping_data);
+	return ret;
+}
+
+int
+iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			   unsigned char *buf)
+{
+	struct se_tmr_req *se_tmr;
+	struct iscsi_tmr_req *tmr_req;
+	struct iscsi_tm *hdr;
+	int out_of_order_cmdsn = 0, ret;
+	bool sess_ref = false;
+	u8 function, tcm_function = TMR_UNKNOWN;
+
+	hdr			= (struct iscsi_tm *) buf;
+	hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
+	function = hdr->flags;
+
+	pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
+		" 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
+		" 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
+		hdr->rtt, hdr->refcmdsn, conn->cid);
+
+	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
+	    ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
+	     hdr->rtt != RESERVED_ITT)) {
+		pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
+		hdr->rtt = RESERVED_ITT;
+	}
+
+	if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
+			!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+		pr_err("Task Management Request TASK_REASSIGN not"
+			" issued as immediate command, bad iSCSI Initiator"
+				"implementation\n");
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_PROTOCOL_ERROR, buf);
+	}
+	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
+	    be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
+		hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
+
+	cmd->data_direction = DMA_NONE;
+
+	cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
+	if (!cmd->tmr_req) {
+		pr_err("Unable to allocate memory for"
+			" Task Management command!\n");
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+					     buf);
+	}
+
+	/*
+	 * TASK_REASSIGN for ERL=2 / connection stays inside of
+	 * LIO-Target $FABRIC_MOD
+	 */
+	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
+		transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+				      conn->sess->se_sess, 0, DMA_NONE,
+				      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+		target_get_sess_cmd(&cmd->se_cmd, true);
+		sess_ref = true;
+
+		switch (function) {
+		case ISCSI_TM_FUNC_ABORT_TASK:
+			tcm_function = TMR_ABORT_TASK;
+			break;
+		case ISCSI_TM_FUNC_ABORT_TASK_SET:
+			tcm_function = TMR_ABORT_TASK_SET;
+			break;
+		case ISCSI_TM_FUNC_CLEAR_ACA:
+			tcm_function = TMR_CLEAR_ACA;
+			break;
+		case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+			tcm_function = TMR_CLEAR_TASK_SET;
+			break;
+		case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+			tcm_function = TMR_LUN_RESET;
+			break;
+		case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+			tcm_function = TMR_TARGET_WARM_RESET;
+			break;
+		case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+			tcm_function = TMR_TARGET_COLD_RESET;
+			break;
+		default:
+			pr_err("Unknown iSCSI TMR Function:"
+			       " 0x%02x\n", function);
+			return iscsit_add_reject_cmd(cmd,
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+		}
+	}
+	ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
+				 GFP_KERNEL);
+	if (ret < 0)
+		return iscsit_add_reject_cmd(cmd,
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+
+	cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
+
+	cmd->iscsi_opcode	= ISCSI_OP_SCSI_TMFUNC;
+	cmd->i_state		= ISTATE_SEND_TASKMGTRSP;
+	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+	cmd->init_task_tag	= hdr->itt;
+	cmd->targ_xfer_tag	= 0xFFFFFFFF;
+	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
+	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
+	se_tmr			= cmd->se_cmd.se_tmr_req;
+	tmr_req			= cmd->tmr_req;
+	/*
+	 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
+	 */
+	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
+		ret = transport_lookup_tmr_lun(&cmd->se_cmd,
+					       scsilun_to_int(&hdr->lun));
+		if (ret < 0) {
+			se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
+			goto attach;
+		}
+	}
+
+	switch (function) {
+	case ISCSI_TM_FUNC_ABORT_TASK:
+		se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
+		if (se_tmr->response)
+			goto attach;
+		break;
+	case ISCSI_TM_FUNC_ABORT_TASK_SET:
+	case ISCSI_TM_FUNC_CLEAR_ACA:
+	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+		break;
+	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+		if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
+			se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
+			goto attach;
+		}
+		break;
+	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+		if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
+			se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
+			goto attach;
+		}
+		break;
+	case ISCSI_TM_FUNC_TASK_REASSIGN:
+		se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
+		/*
+		 * Perform sanity checks on the ExpDataSN only if the
+		 * TASK_REASSIGN was successful.
+		 */
+		if (se_tmr->response)
+			break;
+
+		if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
+			return iscsit_add_reject_cmd(cmd,
+					ISCSI_REASON_BOOKMARK_INVALID, buf);
+		break;
+	default:
+		pr_err("Unknown TMR function: 0x%02x, protocol"
+			" error.\n", function);
+		se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
+		goto attach;
+	}
+
+	if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
+	    (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
+		se_tmr->call_transport = 1;
+attach:
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+		int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+			out_of_order_cmdsn = 1;
+		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+			return 0;
+		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return -1;
+	}
+	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
+
+	if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
+		return 0;
+	/*
+	 * Found the referenced task, send to transport for processing.
+	 */
+	if (se_tmr->call_transport)
+		return transport_generic_handle_tmr(&cmd->se_cmd);
+
+	/*
+	 * Could not find the referenced LUN, task, or Task Management
+	 * command not authorized or supported.  Change state and
+	 * let the tx_thread send the response.
+	 *
+	 * For connection recovery, this is also the default action for
+	 * TMR TASK_REASSIGN.
+	 */
+	if (sess_ref) {
+		pr_debug("Handle TMR, using sess_ref=true check\n");
+		target_put_sess_cmd(&cmd->se_cmd);
+	}
+
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
+
+/* #warning FIXME: Support Text Command parameters besides SendTargets */
+int
+iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+		      struct iscsi_text *hdr)
+{
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
+		pr_err("Unable to accept text parameter length: %u"
+			"greater than MaxXmitDataSegmentLength %u.\n",
+		       payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+					 (unsigned char *)hdr);
+	}
+
+	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
+	     (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
+		pr_err("Multi sequence text commands currently not supported\n");
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
+					(unsigned char *)hdr);
+	}
+
+	pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
+		" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
+		hdr->exp_statsn, payload_length);
+
+	cmd->iscsi_opcode	= ISCSI_OP_TEXT;
+	cmd->i_state		= ISTATE_SEND_TEXTRSP;
+	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+	conn->sess->init_task_tag = cmd->init_task_tag  = hdr->itt;
+	cmd->targ_xfer_tag	= 0xFFFFFFFF;
+	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
+	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
+	cmd->data_direction	= DMA_NONE;
+	kfree(cmd->text_in_ptr);
+	cmd->text_in_ptr	= NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_setup_text_cmd);
+
+int
+iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			struct iscsi_text *hdr)
+{
+	unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
+	int cmdsn_ret;
+
+	if (!text_in) {
+		cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
+		if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
+			pr_err("Unable to locate text_in buffer for sendtargets"
+			       " discovery\n");
+			goto reject;
+		}
+		goto empty_sendtargets;
+	}
+	if (strncmp("SendTargets", text_in, 11) != 0) {
+		pr_err("Received Text Data that is not"
+			" SendTargets, cannot continue.\n");
+		goto reject;
+	}
+	text_ptr = strchr(text_in, '=');
+	if (!text_ptr) {
+		pr_err("No \"=\" separator found in Text Data,"
+			"  cannot continue.\n");
+		goto reject;
+	}
+	if (!strncmp("=All", text_ptr, 4)) {
+		cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
+	} else if (!strncmp("=iqn.", text_ptr, 5) ||
+		   !strncmp("=eui.", text_ptr, 5)) {
+		cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
+	} else {
+		pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
+		goto reject;
+	}
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+empty_sendtargets:
+	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
+
+	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+				(unsigned char *)hdr, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return -1;
+
+		return 0;
+	}
+
+	return iscsit_execute_cmd(cmd, 0);
+
+reject:
+	return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+				 (unsigned char *)hdr);
+}
+EXPORT_SYMBOL(iscsit_process_text_cmd);
+
+static int
+iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+		       unsigned char *buf)
+{
+	struct iscsi_text *hdr = (struct iscsi_text *)buf;
+	char *text_in = NULL;
+	u32 payload_length = ntoh24(hdr->dlength);
+	int rx_size, rc;
+
+	rc = iscsit_setup_text_cmd(conn, cmd, hdr);
+	if (rc < 0)
+		return 0;
+
+	rx_size = payload_length;
+	if (payload_length) {
+		u32 checksum = 0, data_crc = 0;
+		u32 padding = 0, pad_bytes = 0;
+		int niov = 0, rx_got;
+		struct kvec iov[3];
+
+		text_in = kzalloc(payload_length, GFP_KERNEL);
+		if (!text_in) {
+			pr_err("Unable to allocate memory for"
+				" incoming text parameters\n");
+			goto reject;
+		}
+		cmd->text_in_ptr = text_in;
+
+		memset(iov, 0, 3 * sizeof(struct kvec));
+		iov[niov].iov_base	= text_in;
+		iov[niov++].iov_len	= payload_length;
+
+		padding = ((-payload_length) & 3);
+		if (padding != 0) {
+			iov[niov].iov_base = &pad_bytes;
+			iov[niov++].iov_len  = padding;
+			rx_size += padding;
+			pr_debug("Receiving %u additional bytes"
+					" for padding.\n", padding);
+		}
+		if (conn->conn_ops->DataDigest) {
+			iov[niov].iov_base	= &checksum;
+			iov[niov++].iov_len	= ISCSI_CRC_LEN;
+			rx_size += ISCSI_CRC_LEN;
+		}
+
+		rx_got = rx_data(conn, &iov[0], niov, rx_size);
+		if (rx_got != rx_size)
+			goto reject;
+
+		if (conn->conn_ops->DataDigest) {
+			iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+					text_in, payload_length,
+					padding, (u8 *)&pad_bytes,
+					(u8 *)&data_crc);
+
+			if (checksum != data_crc) {
+				pr_err("Text data CRC32C DataDigest"
+					" 0x%08x does not match computed"
+					" 0x%08x\n", checksum, data_crc);
+				if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+					pr_err("Unable to recover from"
+					" Text Data digest failure while in"
+						" ERL=0.\n");
+					goto reject;
+				} else {
+					/*
+					 * Silently drop this PDU and let the
+					 * initiator plug the CmdSN gap.
+					 */
+					pr_debug("Dropping Text"
+					" Command CmdSN: 0x%08x due to"
+					" DataCRC error.\n", hdr->cmdsn);
+					kfree(text_in);
+					return 0;
+				}
+			} else {
+				pr_debug("Got CRC32C DataDigest"
+					" 0x%08x for %u bytes of text data.\n",
+						checksum, payload_length);
+			}
+		}
+		text_in[payload_length - 1] = '\0';
+		pr_debug("Successfully read %d bytes of text"
+				" data.\n", payload_length);
+	}
+
+	return iscsit_process_text_cmd(conn, cmd, hdr);
+
+reject:
+	kfree(cmd->text_in_ptr);
+	cmd->text_in_ptr = NULL;
+	return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
+}
+
+int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_conn *conn_p;
+	struct iscsi_session *sess = conn->sess;
+
+	pr_debug("Received logout request CLOSESESSION on CID: %hu"
+		" for SID: %u.\n", conn->cid, conn->sess->sid);
+
+	atomic_set(&sess->session_logout, 1);
+	atomic_set(&conn->conn_logout_remove, 1);
+	conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
+
+	iscsit_inc_conn_usage_count(conn);
+	iscsit_inc_session_usage_count(sess);
+
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
+		if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
+			continue;
+
+		pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+		conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	return 0;
+}
+
+int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_conn *l_conn;
+	struct iscsi_session *sess = conn->sess;
+
+	pr_debug("Received logout request CLOSECONNECTION for CID:"
+		" %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
+
+	/*
+	 * A Logout Request with a CLOSECONNECTION reason code for a CID
+	 * can arrive on a connection with a differing CID.
+	 */
+	if (conn->cid == cmd->logout_cid) {
+		spin_lock_bh(&conn->state_lock);
+		pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+		conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+
+		atomic_set(&conn->conn_logout_remove, 1);
+		conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
+		iscsit_inc_conn_usage_count(conn);
+
+		spin_unlock_bh(&conn->state_lock);
+	} else {
+		/*
+		 * Handle all different cid CLOSECONNECTION requests in
+		 * iscsit_logout_post_handler_diffcid() as to give enough
+		 * time for any non immediate command's CmdSN to be
+		 * acknowledged on the connection in question.
+		 *
+		 * Here we simply make sure the CID is still around.
+		 */
+		l_conn = iscsit_get_conn_from_cid(sess,
+				cmd->logout_cid);
+		if (!l_conn) {
+			cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
+			iscsit_add_cmd_to_response_queue(cmd, conn,
+					cmd->i_state);
+			return 0;
+		}
+
+		iscsit_dec_conn_usage_count(l_conn);
+	}
+
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	return 0;
+}
+
+int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+
+	pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
+		" CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
+
+	if (sess->sess_ops->ErrorRecoveryLevel != 2) {
+		pr_err("Received Logout Request REMOVECONNFORRECOVERY"
+			" while ERL!=2.\n");
+		cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
+		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+		return 0;
+	}
+
+	if (conn->cid == cmd->logout_cid) {
+		pr_err("Received Logout Request REMOVECONNFORRECOVERY"
+			" with CID: %hu on CID: %hu, implementation error.\n",
+				cmd->logout_cid, conn->cid);
+		cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
+		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+		return 0;
+	}
+
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	return 0;
+}
+
+int
+iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			unsigned char *buf)
+{
+	int cmdsn_ret, logout_remove = 0;
+	u8 reason_code = 0;
+	struct iscsi_logout *hdr;
+	struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
+
+	hdr			= (struct iscsi_logout *) buf;
+	reason_code		= (hdr->flags & 0x7f);
+
+	if (tiqn) {
+		spin_lock(&tiqn->logout_stats.lock);
+		if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
+			tiqn->logout_stats.normal_logouts++;
+		else
+			tiqn->logout_stats.abnormal_logouts++;
+		spin_unlock(&tiqn->logout_stats.lock);
+	}
+
+	pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
+		" ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
+		hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
+		hdr->cid, conn->cid);
+
+	if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
+		pr_err("Received logout request on connection that"
+			" is not in logged in state, ignoring request.\n");
+		iscsit_free_cmd(cmd, false);
+		return 0;
+	}
+
+	cmd->iscsi_opcode       = ISCSI_OP_LOGOUT;
+	cmd->i_state            = ISTATE_SEND_LOGOUTRSP;
+	cmd->immediate_cmd      = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+	conn->sess->init_task_tag = cmd->init_task_tag  = hdr->itt;
+	cmd->targ_xfer_tag      = 0xFFFFFFFF;
+	cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
+	cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
+	cmd->logout_cid         = be16_to_cpu(hdr->cid);
+	cmd->logout_reason      = reason_code;
+	cmd->data_direction     = DMA_NONE;
+
+	/*
+	 * We need to sleep in these cases (by returning 1) until the Logout
+	 * Response gets sent in the tx thread.
+	 */
+	if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
+	   ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
+	    be16_to_cpu(hdr->cid) == conn->cid))
+		logout_remove = 1;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
+		iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
+
+	/*
+	 * Immediate commands are executed, well, immediately.
+	 * Non-Immediate Logout Commands are executed in CmdSN order.
+	 */
+	if (cmd->immediate_cmd) {
+		int ret = iscsit_execute_cmd(cmd, 0);
+
+		if (ret < 0)
+			return ret;
+	} else {
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+			logout_remove = 0;
+		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return -1;
+	}
+
+	return logout_remove;
+}
+EXPORT_SYMBOL(iscsit_handle_logout_cmd);
+
+static int iscsit_handle_snack(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	struct iscsi_snack *hdr;
+
+	hdr			= (struct iscsi_snack *) buf;
+	hdr->flags		&= ~ISCSI_FLAG_CMD_FINAL;
+
+	pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
+		" 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
+		" CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
+			hdr->begrun, hdr->runlength, conn->cid);
+
+	if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+		pr_err("Initiator sent SNACK request while in"
+			" ErrorRecoveryLevel=0.\n");
+		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					 buf);
+	}
+	/*
+	 * SNACK_DATA and SNACK_R2T are both 0,  so check which function to
+	 * call from inside iscsi_send_recovery_datain_or_r2t().
+	 */
+	switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
+	case 0:
+		return iscsit_handle_recovery_datain_or_r2t(conn, buf,
+			hdr->itt,
+			be32_to_cpu(hdr->ttt),
+			be32_to_cpu(hdr->begrun),
+			be32_to_cpu(hdr->runlength));
+	case ISCSI_FLAG_SNACK_TYPE_STATUS:
+		return iscsit_handle_status_snack(conn, hdr->itt,
+			be32_to_cpu(hdr->ttt),
+			be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
+	case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
+		return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
+			be32_to_cpu(hdr->begrun),
+			be32_to_cpu(hdr->runlength));
+	case ISCSI_FLAG_SNACK_TYPE_RDATA:
+		/* FIXME: Support R-Data SNACK */
+		pr_err("R-Data SNACK Not Supported.\n");
+		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					 buf);
+	default:
+		pr_err("Unknown SNACK type 0x%02x, protocol"
+			" error.\n", hdr->flags & 0x0f);
+		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					 buf);
+	}
+
+	return 0;
+}
+
+static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+		wait_for_completion_interruptible_timeout(
+					&conn->rx_half_close_comp,
+					ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
+	}
+}
+
+static int iscsit_handle_immediate_data(
+	struct iscsi_cmd *cmd,
+	struct iscsi_scsi_req *hdr,
+	u32 length)
+{
+	int iov_ret, rx_got = 0, rx_size = 0;
+	u32 checksum, iov_count = 0, padding = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct kvec *iov;
+
+	iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
+	if (iov_ret < 0)
+		return IMMEDIATE_DATA_CANNOT_RECOVER;
+
+	rx_size = length;
+	iov_count = iov_ret;
+	iov = &cmd->iov_data[0];
+
+	padding = ((-length) & 3);
+	if (padding != 0) {
+		iov[iov_count].iov_base	= cmd->pad_bytes;
+		iov[iov_count++].iov_len = padding;
+		rx_size += padding;
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		iov[iov_count].iov_base		= &checksum;
+		iov[iov_count++].iov_len	= ISCSI_CRC_LEN;
+		rx_size += ISCSI_CRC_LEN;
+	}
+
+	rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
+
+	iscsit_unmap_iovec(cmd);
+
+	if (rx_got != rx_size) {
+		iscsit_rx_thread_wait_for_tcp(conn);
+		return IMMEDIATE_DATA_CANNOT_RECOVER;
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		u32 data_crc;
+
+		data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+						    cmd->write_data_done, length, padding,
+						    cmd->pad_bytes);
+
+		if (checksum != data_crc) {
+			pr_err("ImmediateData CRC32C DataDigest 0x%08x"
+				" does not match computed 0x%08x\n", checksum,
+				data_crc);
+
+			if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+				pr_err("Unable to recover from"
+					" Immediate Data digest failure while"
+					" in ERL=0.\n");
+				iscsit_reject_cmd(cmd,
+						ISCSI_REASON_DATA_DIGEST_ERROR,
+						(unsigned char *)hdr);
+				return IMMEDIATE_DATA_CANNOT_RECOVER;
+			} else {
+				iscsit_reject_cmd(cmd,
+						ISCSI_REASON_DATA_DIGEST_ERROR,
+						(unsigned char *)hdr);
+				return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
+			}
+		} else {
+			pr_debug("Got CRC32C DataDigest 0x%08x for"
+				" %u bytes of Immediate Data\n", checksum,
+				length);
+		}
+	}
+
+	cmd->write_data_done += length;
+
+	if (cmd->write_data_done == cmd->se_cmd.data_length) {
+		spin_lock_bh(&cmd->istate_lock);
+		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+		spin_unlock_bh(&cmd->istate_lock);
+	}
+
+	return IMMEDIATE_DATA_NORMAL_OPERATION;
+}
+
+/*
+ *	Called with sess->conn_lock held.
+ */
+/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
+	with active network interface */
+static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd;
+	struct iscsi_conn *conn_p;
+	bool found = false;
+
+	/*
+	 * Only send a Asynchronous Message on connections whos network
+	 * interface is still functional.
+	 */
+	list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
+		if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
+			iscsit_inc_conn_usage_count(conn_p);
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return;
+
+	cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
+	if (!cmd) {
+		iscsit_dec_conn_usage_count(conn_p);
+		return;
+	}
+
+	cmd->logout_cid = conn->cid;
+	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
+	cmd->i_state = ISTATE_SEND_ASYNCMSG;
+
+	spin_lock_bh(&conn_p->cmd_lock);
+	list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
+	spin_unlock_bh(&conn_p->cmd_lock);
+
+	iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
+	iscsit_dec_conn_usage_count(conn_p);
+}
+
+static int iscsit_send_conn_drop_async_message(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_async *hdr;
+
+	cmd->tx_size = ISCSI_HDR_LEN;
+	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
+
+	hdr			= (struct iscsi_async *) cmd->pdu;
+	hdr->opcode		= ISCSI_OP_ASYNC_EVENT;
+	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
+	cmd->init_task_tag	= RESERVED_ITT;
+	cmd->targ_xfer_tag	= 0xFFFFFFFF;
+	put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
+	hdr->async_event	= ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
+	hdr->param1		= cpu_to_be16(cmd->logout_cid);
+	hdr->param2		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
+	hdr->param3		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
+
+		cmd->tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32C HeaderDigest to"
+			" Async Message 0x%08x\n", *header_digest);
+	}
+
+	cmd->iov_misc[0].iov_base	= cmd->pdu;
+	cmd->iov_misc[0].iov_len	= cmd->tx_size;
+	cmd->iov_misc_count		= 1;
+
+	pr_debug("Sending Connection Dropped Async Message StatSN:"
+		" 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
+			cmd->logout_cid, conn->cid);
+	return 0;
+}
+
+static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+		wait_for_completion_interruptible_timeout(
+					&conn->tx_half_close_comp,
+					ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
+	}
+}
+
+static void
+iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+			struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
+			bool set_statsn)
+{
+	hdr->opcode		= ISCSI_OP_SCSI_DATA_IN;
+	hdr->flags		= datain->flags;
+	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+		if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+			hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
+			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+		} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+			hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
+			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+		}
+	}
+	hton24(hdr->dlength, datain->length);
+	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
+		int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+				(struct scsi_lun *)&hdr->lun);
+	else
+		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+
+	hdr->itt		= cmd->init_task_tag;
+
+	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
+		hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
+	else
+		hdr->ttt		= cpu_to_be32(0xFFFFFFFF);
+	if (set_statsn)
+		hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+	else
+		hdr->statsn		= cpu_to_be32(0xFFFFFFFF);
+
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
+	hdr->datasn		= cpu_to_be32(datain->data_sn);
+	hdr->offset		= cpu_to_be32(datain->offset);
+
+	pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
+		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+		cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
+		ntohl(hdr->offset), datain->length, conn->cid);
+}
+
+static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
+	struct iscsi_datain datain;
+	struct iscsi_datain_req *dr;
+	struct kvec *iov;
+	u32 iov_count = 0, tx_size = 0;
+	int eodr = 0, ret, iov_ret;
+	bool set_statsn = false;
+
+	memset(&datain, 0, sizeof(struct iscsi_datain));
+	dr = iscsit_get_datain_values(cmd, &datain);
+	if (!dr) {
+		pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
+				cmd->init_task_tag);
+		return -1;
+	}
+	/*
+	 * Be paranoid and double check the logic for now.
+	 */
+	if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
+		pr_err("Command ITT: 0x%08x, datain.offset: %u and"
+			" datain.length: %u exceeds cmd->data_length: %u\n",
+			cmd->init_task_tag, datain.offset, datain.length,
+			cmd->se_cmd.data_length);
+		return -1;
+	}
+
+	atomic_long_add(datain.length, &conn->sess->tx_data_octets);
+	/*
+	 * Special case for successfully execution w/ both DATAIN
+	 * and Sense Data.
+	 */
+	if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
+	    (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
+		datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
+	else {
+		if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
+		    (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
+			iscsit_increment_maxcmdsn(cmd, conn->sess);
+			cmd->stat_sn = conn->stat_sn++;
+			set_statsn = true;
+		} else if (dr->dr_complete ==
+			   DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
+			set_statsn = true;
+	}
+
+	iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
+
+	iov = &cmd->iov_data[0];
+	iov[iov_count].iov_base	= cmd->pdu;
+	iov[iov_count++].iov_len	= ISCSI_HDR_LEN;
+	tx_size += ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+
+		pr_debug("Attaching CRC32 HeaderDigest"
+			" for DataIN PDU 0x%08x\n", *header_digest);
+	}
+
+	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
+				datain.offset, datain.length);
+	if (iov_ret < 0)
+		return -1;
+
+	iov_count += iov_ret;
+	tx_size += datain.length;
+
+	cmd->padding = ((-datain.length) & 3);
+	if (cmd->padding) {
+		iov[iov_count].iov_base		= cmd->pad_bytes;
+		iov[iov_count++].iov_len	= cmd->padding;
+		tx_size += cmd->padding;
+
+		pr_debug("Attaching %u padding bytes\n",
+				cmd->padding);
+	}
+	if (conn->conn_ops->DataDigest) {
+		cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
+			 datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
+
+		iov[iov_count].iov_base	= &cmd->data_crc;
+		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+
+		pr_debug("Attached CRC32C DataDigest %d bytes, crc"
+			" 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
+	}
+
+	cmd->iov_data_count = iov_count;
+	cmd->tx_size = tx_size;
+
+	ret = iscsit_fe_sendpage_sg(cmd, conn);
+
+	iscsit_unmap_iovec(cmd);
+
+	if (ret < 0) {
+		iscsit_tx_thread_wait_for_tcp(conn);
+		return ret;
+	}
+
+	if (dr->dr_complete) {
+		eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
+				2 : 1;
+		iscsit_free_datain_req(cmd, dr);
+	}
+
+	return eodr;
+}
+
+int
+iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+			struct iscsi_logout_rsp *hdr)
+{
+	struct iscsi_conn *logout_conn = NULL;
+	struct iscsi_conn_recovery *cr = NULL;
+	struct iscsi_session *sess = conn->sess;
+	/*
+	 * The actual shutting down of Sessions and/or Connections
+	 * for CLOSESESSION and CLOSECONNECTION Logout Requests
+	 * is done in scsi_logout_post_handler().
+	 */
+	switch (cmd->logout_reason) {
+	case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+		pr_debug("iSCSI session logout successful, setting"
+			" logout response to ISCSI_LOGOUT_SUCCESS.\n");
+		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+		break;
+	case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+		if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
+			break;
+		/*
+		 * For CLOSECONNECTION logout requests carrying
+		 * a matching logout CID -> local CID, the reference
+		 * for the local CID will have been incremented in
+		 * iscsi_logout_closeconnection().
+		 *
+		 * For CLOSECONNECTION logout requests carrying
+		 * a different CID than the connection it arrived
+		 * on, the connection responding to cmd->logout_cid
+		 * is stopped in iscsit_logout_post_handler_diffcid().
+		 */
+
+		pr_debug("iSCSI CID: %hu logout on CID: %hu"
+			" successful.\n", cmd->logout_cid, conn->cid);
+		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+		break;
+	case ISCSI_LOGOUT_REASON_RECOVERY:
+		if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
+		    (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
+			break;
+		/*
+		 * If the connection is still active from our point of view
+		 * force connection recovery to occur.
+		 */
+		logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
+				cmd->logout_cid);
+		if (logout_conn) {
+			iscsit_connection_reinstatement_rcfr(logout_conn);
+			iscsit_dec_conn_usage_count(logout_conn);
+		}
+
+		cr = iscsit_get_inactive_connection_recovery_entry(
+				conn->sess, cmd->logout_cid);
+		if (!cr) {
+			pr_err("Unable to locate CID: %hu for"
+			" REMOVECONNFORRECOVERY Logout Request.\n",
+				cmd->logout_cid);
+			cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
+			break;
+		}
+
+		iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
+
+		pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
+			" for recovery for CID: %hu on CID: %hu successful.\n",
+				cmd->logout_cid, conn->cid);
+		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+		break;
+	default:
+		pr_err("Unknown cmd->logout_reason: 0x%02x\n",
+				cmd->logout_reason);
+		return -1;
+	}
+
+	hdr->opcode		= ISCSI_OP_LOGOUT_RSP;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	hdr->response		= cmd->logout_response;
+	hdr->itt		= cmd->init_task_tag;
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+
+	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
+
+	pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
+		" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
+		cmd->init_task_tag, cmd->stat_sn, hdr->response,
+		cmd->logout_cid, conn->cid);
+
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_build_logout_rsp);
+
+static int
+iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct kvec *iov;
+	int niov = 0, tx_size, rc;
+
+	rc = iscsit_build_logout_rsp(cmd, conn,
+			(struct iscsi_logout_rsp *)&cmd->pdu[0]);
+	if (rc < 0)
+		return rc;
+
+	tx_size = ISCSI_HDR_LEN;
+	iov = &cmd->iov_misc[0];
+	iov[niov].iov_base	= cmd->pdu;
+	iov[niov++].iov_len	= ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32C HeaderDigest to"
+			" Logout Response 0x%08x\n", *header_digest);
+	}
+	cmd->iov_misc_count = niov;
+	cmd->tx_size = tx_size;
+
+	return 0;
+}
+
+void
+iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+		       struct iscsi_nopin *hdr, bool nopout_response)
+{
+	hdr->opcode		= ISCSI_OP_NOOP_IN;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+        hton24(hdr->dlength, cmd->buf_ptr_size);
+	if (nopout_response)
+		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+	hdr->itt		= cmd->init_task_tag;
+	hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
+	cmd->stat_sn		= (nopout_response) ? conn->stat_sn++ :
+				  conn->stat_sn;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+
+	if (nopout_response)
+		iscsit_increment_maxcmdsn(cmd, conn->sess);
+
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
+
+	pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
+		" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
+		"Solicitied" : "Unsolicitied", cmd->init_task_tag,
+		cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
+}
+EXPORT_SYMBOL(iscsit_build_nopin_rsp);
+
+/*
+ *	Unsolicited NOPIN, either requesting a response or not.
+ */
+static int iscsit_send_unsolicited_nopin(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn,
+	int want_response)
+{
+	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
+	int tx_size = ISCSI_HDR_LEN, ret;
+
+	iscsit_build_nopin_rsp(cmd, conn, hdr, false);
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
+
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32C HeaderDigest to"
+			" NopIN 0x%08x\n", *header_digest);
+	}
+
+	cmd->iov_misc[0].iov_base	= cmd->pdu;
+	cmd->iov_misc[0].iov_len	= tx_size;
+	cmd->iov_misc_count	= 1;
+	cmd->tx_size		= tx_size;
+
+	pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
+		" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
+
+	ret = iscsit_send_tx_data(cmd, conn, 1);
+	if (ret < 0) {
+		iscsit_tx_thread_wait_for_tcp(conn);
+		return ret;
+	}
+
+	spin_lock_bh(&cmd->istate_lock);
+	cmd->i_state = want_response ?
+		ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
+	spin_unlock_bh(&cmd->istate_lock);
+
+	return 0;
+}
+
+static int
+iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
+	struct kvec *iov;
+	u32 padding = 0;
+	int niov = 0, tx_size;
+
+	iscsit_build_nopin_rsp(cmd, conn, hdr, true);
+
+	tx_size = ISCSI_HDR_LEN;
+	iov = &cmd->iov_misc[0];
+	iov[niov].iov_base	= cmd->pdu;
+	iov[niov++].iov_len	= ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32C HeaderDigest"
+			" to NopIn 0x%08x\n", *header_digest);
+	}
+
+	/*
+	 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
+	 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
+	 */
+	if (cmd->buf_ptr_size) {
+		iov[niov].iov_base	= cmd->buf_ptr;
+		iov[niov++].iov_len	= cmd->buf_ptr_size;
+		tx_size += cmd->buf_ptr_size;
+
+		pr_debug("Echoing back %u bytes of ping"
+			" data.\n", cmd->buf_ptr_size);
+
+		padding = ((-cmd->buf_ptr_size) & 3);
+		if (padding != 0) {
+			iov[niov].iov_base = &cmd->pad_bytes;
+			iov[niov++].iov_len = padding;
+			tx_size += padding;
+			pr_debug("Attaching %u additional"
+				" padding bytes.\n", padding);
+		}
+		if (conn->conn_ops->DataDigest) {
+			iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				cmd->buf_ptr, cmd->buf_ptr_size,
+				padding, (u8 *)&cmd->pad_bytes,
+				(u8 *)&cmd->data_crc);
+
+			iov[niov].iov_base = &cmd->data_crc;
+			iov[niov++].iov_len = ISCSI_CRC_LEN;
+			tx_size += ISCSI_CRC_LEN;
+			pr_debug("Attached DataDigest for %u"
+				" bytes of ping data, CRC 0x%08x\n",
+				cmd->buf_ptr_size, cmd->data_crc);
+		}
+	}
+
+	cmd->iov_misc_count = niov;
+	cmd->tx_size = tx_size;
+
+	return 0;
+}
+
+static int iscsit_send_r2t(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	int tx_size = 0;
+	struct iscsi_r2t *r2t;
+	struct iscsi_r2t_rsp *hdr;
+	int ret;
+
+	r2t = iscsit_get_r2t_from_list(cmd);
+	if (!r2t)
+		return -1;
+
+	hdr			= (struct iscsi_r2t_rsp *) cmd->pdu;
+	memset(hdr, 0, ISCSI_HDR_LEN);
+	hdr->opcode		= ISCSI_OP_R2T;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+			(struct scsi_lun *)&hdr->lun);
+	hdr->itt		= cmd->init_task_tag;
+	r2t->targ_xfer_tag	= session_get_next_ttt(conn->sess);
+	hdr->ttt		= cpu_to_be32(r2t->targ_xfer_tag);
+	hdr->statsn		= cpu_to_be32(conn->stat_sn);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
+	hdr->r2tsn		= cpu_to_be32(r2t->r2t_sn);
+	hdr->data_offset	= cpu_to_be32(r2t->offset);
+	hdr->data_length	= cpu_to_be32(r2t->xfer_len);
+
+	cmd->iov_misc[0].iov_base	= cmd->pdu;
+	cmd->iov_misc[0].iov_len	= ISCSI_HDR_LEN;
+	tx_size += ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
+
+		cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 HeaderDigest for R2T"
+			" PDU 0x%08x\n", *header_digest);
+	}
+
+	pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
+		" 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
+		(!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
+		r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
+			r2t->offset, r2t->xfer_len, conn->cid);
+
+	cmd->iov_misc_count = 1;
+	cmd->tx_size = tx_size;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	r2t->sent_r2t = 1;
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	ret = iscsit_send_tx_data(cmd, conn, 1);
+	if (ret < 0) {
+		iscsit_tx_thread_wait_for_tcp(conn);
+		return ret;
+	}
+
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	iscsit_start_dataout_timer(cmd, conn);
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+
+	return 0;
+}
+
+/*
+ *	@recovery: If called from iscsi_task_reassign_complete_write() for
+ *		connection recovery.
+ */
+int iscsit_build_r2ts_for_cmd(
+	struct iscsi_conn *conn,
+	struct iscsi_cmd *cmd,
+	bool recovery)
+{
+	int first_r2t = 1;
+	u32 offset = 0, xfer_len = 0;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
+		spin_unlock_bh(&cmd->r2t_lock);
+		return 0;
+	}
+
+	if (conn->sess->sess_ops->DataSequenceInOrder &&
+	    !recovery)
+		cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
+
+	while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
+		if (conn->sess->sess_ops->DataSequenceInOrder) {
+			offset = cmd->r2t_offset;
+
+			if (first_r2t && recovery) {
+				int new_data_end = offset +
+					conn->sess->sess_ops->MaxBurstLength -
+					cmd->next_burst_len;
+
+				if (new_data_end > cmd->se_cmd.data_length)
+					xfer_len = cmd->se_cmd.data_length - offset;
+				else
+					xfer_len =
+						conn->sess->sess_ops->MaxBurstLength -
+						cmd->next_burst_len;
+			} else {
+				int new_data_end = offset +
+					conn->sess->sess_ops->MaxBurstLength;
+
+				if (new_data_end > cmd->se_cmd.data_length)
+					xfer_len = cmd->se_cmd.data_length - offset;
+				else
+					xfer_len = conn->sess->sess_ops->MaxBurstLength;
+			}
+			cmd->r2t_offset += xfer_len;
+
+			if (cmd->r2t_offset == cmd->se_cmd.data_length)
+				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+		} else {
+			struct iscsi_seq *seq;
+
+			seq = iscsit_get_seq_holder_for_r2t(cmd);
+			if (!seq) {
+				spin_unlock_bh(&cmd->r2t_lock);
+				return -1;
+			}
+
+			offset = seq->offset;
+			xfer_len = seq->xfer_len;
+
+			if (cmd->seq_send_order == cmd->seq_count)
+				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+		}
+		cmd->outstanding_r2ts++;
+		first_r2t = 0;
+
+		if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
+			spin_unlock_bh(&cmd->r2t_lock);
+			return -1;
+		}
+
+		if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
+			break;
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return 0;
+}
+
+void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+			bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
+{
+	if (inc_stat_sn)
+		cmd->stat_sn = conn->stat_sn++;
+
+	atomic_long_inc(&conn->sess->rsp_pdus);
+
+	memset(hdr, 0, ISCSI_HDR_LEN);
+	hdr->opcode		= ISCSI_OP_SCSI_CMD_RSP;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+		hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
+		hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+	} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+		hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+		hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+	}
+	hdr->response		= cmd->iscsi_response;
+	hdr->cmd_status		= cmd->se_cmd.scsi_status;
+	hdr->itt		= cmd->init_task_tag;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+
+	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
+
+	pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
+		" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
+		cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
+		cmd->se_cmd.scsi_status, conn->cid);
+}
+EXPORT_SYMBOL(iscsit_build_rsp_pdu);
+
+static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
+	struct kvec *iov;
+	u32 padding = 0, tx_size = 0;
+	int iov_count = 0;
+	bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
+
+	iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
+
+	iov = &cmd->iov_misc[0];
+	iov[iov_count].iov_base	= cmd->pdu;
+	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+	tx_size += ISCSI_HDR_LEN;
+
+	/*
+	 * Attach SENSE DATA payload to iSCSI Response PDU
+	 */
+	if (cmd->se_cmd.sense_buffer &&
+	   ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+	    (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+		put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
+		cmd->se_cmd.scsi_sense_length += sizeof (__be16);
+
+		padding		= -(cmd->se_cmd.scsi_sense_length) & 3;
+		hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
+		iov[iov_count].iov_base	= cmd->sense_buffer;
+		iov[iov_count++].iov_len =
+				(cmd->se_cmd.scsi_sense_length + padding);
+		tx_size += cmd->se_cmd.scsi_sense_length;
+
+		if (padding) {
+			memset(cmd->sense_buffer +
+				cmd->se_cmd.scsi_sense_length, 0, padding);
+			tx_size += padding;
+			pr_debug("Adding %u bytes of padding to"
+				" SENSE.\n", padding);
+		}
+
+		if (conn->conn_ops->DataDigest) {
+			iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				cmd->sense_buffer,
+				(cmd->se_cmd.scsi_sense_length + padding),
+				0, NULL, (u8 *)&cmd->data_crc);
+
+			iov[iov_count].iov_base    = &cmd->data_crc;
+			iov[iov_count++].iov_len     = ISCSI_CRC_LEN;
+			tx_size += ISCSI_CRC_LEN;
+
+			pr_debug("Attaching CRC32 DataDigest for"
+				" SENSE, %u bytes CRC 0x%08x\n",
+				(cmd->se_cmd.scsi_sense_length + padding),
+				cmd->data_crc);
+		}
+
+		pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
+				" Response PDU\n",
+				cmd->se_cmd.scsi_sense_length);
+	}
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 HeaderDigest for Response"
+				" PDU 0x%08x\n", *header_digest);
+	}
+
+	cmd->iov_misc_count = iov_count;
+	cmd->tx_size = tx_size;
+
+	return 0;
+}
+
+static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
+{
+	switch (se_tmr->response) {
+	case TMR_FUNCTION_COMPLETE:
+		return ISCSI_TMF_RSP_COMPLETE;
+	case TMR_TASK_DOES_NOT_EXIST:
+		return ISCSI_TMF_RSP_NO_TASK;
+	case TMR_LUN_DOES_NOT_EXIST:
+		return ISCSI_TMF_RSP_NO_LUN;
+	case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+		return ISCSI_TMF_RSP_NOT_SUPPORTED;
+	case TMR_FUNCTION_REJECTED:
+	default:
+		return ISCSI_TMF_RSP_REJECTED;
+	}
+}
+
+void
+iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+			  struct iscsi_tm_rsp *hdr)
+{
+	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+
+	hdr->opcode		= ISCSI_OP_SCSI_TMFUNC_RSP;
+	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
+	hdr->response		= iscsit_convert_tcm_tmr_rsp(se_tmr);
+	hdr->itt		= cmd->init_task_tag;
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+
+	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
+
+	pr_debug("Built Task Management Response ITT: 0x%08x,"
+		" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
+		cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
+}
+EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
+
+static int
+iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
+	u32 tx_size = 0;
+
+	iscsit_build_task_mgt_rsp(cmd, conn, hdr);
+
+	cmd->iov_misc[0].iov_base	= cmd->pdu;
+	cmd->iov_misc[0].iov_len	= ISCSI_HDR_LEN;
+	tx_size += ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
+
+		cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 HeaderDigest for Task"
+			" Mgmt Response PDU 0x%08x\n", *header_digest);
+	}
+
+	cmd->iov_misc_count = 1;
+	cmd->tx_size = tx_size;
+
+	return 0;
+}
+
+static bool iscsit_check_inaddr_any(struct iscsi_np *np)
+{
+	bool ret = false;
+
+	if (np->np_sockaddr.ss_family == AF_INET6) {
+		const struct sockaddr_in6 sin6 = {
+			.sin6_addr = IN6ADDR_ANY_INIT };
+		struct sockaddr_in6 *sock_in6 =
+			 (struct sockaddr_in6 *)&np->np_sockaddr;
+
+		if (!memcmp(sock_in6->sin6_addr.s6_addr,
+				sin6.sin6_addr.s6_addr, 16))
+			ret = true;
+	} else {
+		struct sockaddr_in * sock_in =
+			(struct sockaddr_in *)&np->np_sockaddr;
+
+		if (sock_in->sin_addr.s_addr == htonl(INADDR_ANY))
+			ret = true;
+	}
+
+	return ret;
+}
+
+#define SENDTARGETS_BUF_LIMIT 32768U
+
+static int
+iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
+				  enum iscsit_transport_type network_transport,
+				  int skip_bytes, bool *completed)
+{
+	char *payload = NULL;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tiqn *tiqn;
+	struct iscsi_tpg_np *tpg_np;
+	int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
+	int target_name_printed;
+	unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
+	unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
+	bool active;
+
+	buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
+			 SENDTARGETS_BUF_LIMIT);
+
+	payload = kzalloc(buffer_len, GFP_KERNEL);
+	if (!payload) {
+		pr_err("Unable to allocate memory for sendtargets"
+				" response.\n");
+		return -ENOMEM;
+	}
+	/*
+	 * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
+	 * explicit case..
+	 */
+	if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
+		text_ptr = strchr(text_in, '=');
+		if (!text_ptr) {
+			pr_err("Unable to locate '=' string in text_in:"
+			       " %s\n", text_in);
+			kfree(payload);
+			return -EINVAL;
+		}
+		/*
+		 * Skip over '=' character..
+		 */
+		text_ptr += 1;
+	}
+
+	spin_lock(&tiqn_lock);
+	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
+		if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
+		     strcmp(tiqn->tiqn, text_ptr)) {
+			continue;
+		}
+
+		target_name_printed = 0;
+
+		spin_lock(&tiqn->tiqn_tpg_lock);
+		list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+			/* If demo_mode_discovery=0 and generate_node_acls=0
+			 * (demo mode dislabed) do not return
+			 * TargetName+TargetAddress unless a NodeACL exists.
+			 */
+
+			if ((tpg->tpg_attrib.generate_node_acls == 0) &&
+			    (tpg->tpg_attrib.demo_mode_discovery == 0) &&
+			    (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
+				cmd->conn->sess->sess_ops->InitiatorName))) {
+				continue;
+			}
+
+			spin_lock(&tpg->tpg_state_lock);
+			active = (tpg->tpg_state == TPG_STATE_ACTIVE);
+			spin_unlock(&tpg->tpg_state_lock);
+
+			if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
+				continue;
+
+			spin_lock(&tpg->tpg_np_lock);
+			list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
+						tpg_np_list) {
+				struct iscsi_np *np = tpg_np->tpg_np;
+				bool inaddr_any = iscsit_check_inaddr_any(np);
+				struct sockaddr_storage *sockaddr;
+
+				if (np->np_network_transport != network_transport)
+					continue;
+
+				if (!target_name_printed) {
+					len = sprintf(buf, "TargetName=%s",
+						      tiqn->tiqn);
+					len += 1;
+
+					if ((len + payload_len) > buffer_len) {
+						spin_unlock(&tpg->tpg_np_lock);
+						spin_unlock(&tiqn->tiqn_tpg_lock);
+						end_of_buf = 1;
+						goto eob;
+					}
+
+					if (skip_bytes && len <= skip_bytes) {
+						skip_bytes -= len;
+					} else {
+						memcpy(payload + payload_len, buf, len);
+						payload_len += len;
+						target_name_printed = 1;
+						if (len > skip_bytes)
+							skip_bytes = 0;
+					}
+				}
+
+				if (inaddr_any)
+					sockaddr = &conn->local_sockaddr;
+				else
+					sockaddr = &np->np_sockaddr;
+
+				len = sprintf(buf, "TargetAddress="
+					      "%pISpc,%hu",
+					      sockaddr,
+					      tpg->tpgt);
+				len += 1;
+
+				if ((len + payload_len) > buffer_len) {
+					spin_unlock(&tpg->tpg_np_lock);
+					spin_unlock(&tiqn->tiqn_tpg_lock);
+					end_of_buf = 1;
+					goto eob;
+				}
+
+				if (skip_bytes && len <= skip_bytes) {
+					skip_bytes -= len;
+				} else {
+					memcpy(payload + payload_len, buf, len);
+					payload_len += len;
+					if (len > skip_bytes)
+						skip_bytes = 0;
+				}
+			}
+			spin_unlock(&tpg->tpg_np_lock);
+		}
+		spin_unlock(&tiqn->tiqn_tpg_lock);
+eob:
+		if (end_of_buf) {
+			*completed = false;
+			break;
+		}
+
+		if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
+			break;
+	}
+	spin_unlock(&tiqn_lock);
+
+	cmd->buf_ptr = payload;
+
+	return payload_len;
+}
+
+int
+iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+		      struct iscsi_text_rsp *hdr,
+		      enum iscsit_transport_type network_transport)
+{
+	int text_length, padding;
+	bool completed = true;
+
+	text_length = iscsit_build_sendtargets_response(cmd, network_transport,
+							cmd->read_data_done,
+							&completed);
+	if (text_length < 0)
+		return text_length;
+
+	if (completed) {
+		hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+	} else {
+		hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
+		cmd->read_data_done += text_length;
+		if (cmd->targ_xfer_tag == 0xFFFFFFFF)
+			cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
+	}
+	hdr->opcode = ISCSI_OP_TEXT_RSP;
+	padding = ((-text_length) & 3);
+	hton24(hdr->dlength, text_length);
+	hdr->itt = cmd->init_task_tag;
+	hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+	cmd->stat_sn = conn->stat_sn++;
+	hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	/*
+	 * Reset maxcmdsn_inc in multi-part text payload exchanges to
+	 * correctly increment MaxCmdSN for each response answering a
+	 * non immediate text request with a valid CmdSN.
+	 */
+	cmd->maxcmdsn_inc = 0;
+	hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
+
+	pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
+		" Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
+		cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
+		!!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
+		!!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
+
+	return text_length + padding;
+}
+EXPORT_SYMBOL(iscsit_build_text_rsp);
+
+static int iscsit_send_text_rsp(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
+	struct kvec *iov;
+	u32 tx_size = 0;
+	int text_length, iov_count = 0, rc;
+
+	rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
+	if (rc < 0)
+		return rc;
+
+	text_length = rc;
+	iov = &cmd->iov_misc[0];
+	iov[iov_count].iov_base = cmd->pdu;
+	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+	iov[iov_count].iov_base	= cmd->buf_ptr;
+	iov[iov_count++].iov_len = text_length;
+
+	tx_size += (ISCSI_HDR_LEN + text_length);
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 HeaderDigest for"
+			" Text Response PDU 0x%08x\n", *header_digest);
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				cmd->buf_ptr, text_length,
+				0, NULL, (u8 *)&cmd->data_crc);
+
+		iov[iov_count].iov_base	= &cmd->data_crc;
+		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+		tx_size	+= ISCSI_CRC_LEN;
+
+		pr_debug("Attaching DataDigest for %u bytes of text"
+			" data, CRC 0x%08x\n", text_length,
+			cmd->data_crc);
+	}
+
+	cmd->iov_misc_count = iov_count;
+	cmd->tx_size = tx_size;
+
+	return 0;
+}
+
+void
+iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+		    struct iscsi_reject *hdr)
+{
+	hdr->opcode		= ISCSI_OP_REJECT;
+	hdr->reason		= cmd->reject_reason;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	hton24(hdr->dlength, ISCSI_HDR_LEN);
+	hdr->ffffffff		= cpu_to_be32(0xffffffff);
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
+
+}
+EXPORT_SYMBOL(iscsit_build_reject);
+
+static int iscsit_send_reject(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
+	struct kvec *iov;
+	u32 iov_count = 0, tx_size;
+
+	iscsit_build_reject(cmd, conn, hdr);
+
+	iov = &cmd->iov_misc[0];
+	iov[iov_count].iov_base = cmd->pdu;
+	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+	iov[iov_count].iov_base = cmd->buf_ptr;
+	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+
+	tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 HeaderDigest for"
+			" REJECT PDU 0x%08x\n", *header_digest);
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
+				ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
+
+		iov[iov_count].iov_base = &cmd->data_crc;
+		iov[iov_count++].iov_len  = ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 DataDigest for REJECT"
+				" PDU 0x%08x\n", cmd->data_crc);
+	}
+
+	cmd->iov_misc_count = iov_count;
+	cmd->tx_size = tx_size;
+
+	pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
+		" CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
+
+	return 0;
+}
+
+void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+{
+	int ord, cpu;
+	/*
+	 * bitmap_id is assigned from iscsit_global->ts_bitmap from
+	 * within iscsit_start_kthreads()
+	 *
+	 * Here we use bitmap_id to determine which CPU that this
+	 * iSCSI connection's RX/TX threads will be scheduled to
+	 * execute upon.
+	 */
+	ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
+	for_each_online_cpu(cpu) {
+		if (ord-- == 0) {
+			cpumask_set_cpu(cpu, conn->conn_cpumask);
+			return;
+		}
+	}
+	/*
+	 * This should never be reached..
+	 */
+	dump_stack();
+	cpumask_setall(conn->conn_cpumask);
+}
+
+static inline void iscsit_thread_check_cpumask(
+	struct iscsi_conn *conn,
+	struct task_struct *p,
+	int mode)
+{
+	/*
+	 * mode == 1 signals iscsi_target_tx_thread() usage.
+	 * mode == 0 signals iscsi_target_rx_thread() usage.
+	 */
+	if (mode == 1) {
+		if (!conn->conn_tx_reset_cpumask)
+			return;
+		conn->conn_tx_reset_cpumask = 0;
+	} else {
+		if (!conn->conn_rx_reset_cpumask)
+			return;
+		conn->conn_rx_reset_cpumask = 0;
+	}
+	/*
+	 * Update the CPU mask for this single kthread so that
+	 * both TX and RX kthreads are scheduled to run on the
+	 * same CPU.
+	 */
+	set_cpus_allowed_ptr(p, conn->conn_cpumask);
+}
+
+static int
+iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+{
+	int ret;
+
+	switch (state) {
+	case ISTATE_SEND_R2T:
+		ret = iscsit_send_r2t(cmd, conn);
+		if (ret < 0)
+			goto err;
+		break;
+	case ISTATE_REMOVE:
+		spin_lock_bh(&conn->cmd_lock);
+		list_del_init(&cmd->i_conn_node);
+		spin_unlock_bh(&conn->cmd_lock);
+
+		iscsit_free_cmd(cmd, false);
+		break;
+	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+		iscsit_mod_nopin_response_timer(conn);
+		ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
+		if (ret < 0)
+			goto err;
+		break;
+	case ISTATE_SEND_NOPIN_NO_RESPONSE:
+		ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
+		if (ret < 0)
+			goto err;
+		break;
+	default:
+		pr_err("Unknown Opcode: 0x%02x ITT:"
+		       " 0x%08x, i_state: %d on CID: %hu\n",
+		       cmd->iscsi_opcode, cmd->init_task_tag, state,
+		       conn->cid);
+		goto err;
+	}
+
+	return 0;
+
+err:
+	return -1;
+}
+
+static int
+iscsit_handle_immediate_queue(struct iscsi_conn *conn)
+{
+	struct iscsit_transport *t = conn->conn_transport;
+	struct iscsi_queue_req *qr;
+	struct iscsi_cmd *cmd;
+	u8 state;
+	int ret;
+
+	while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
+		atomic_set(&conn->check_immediate_queue, 0);
+		cmd = qr->cmd;
+		state = qr->state;
+		kmem_cache_free(lio_qr_cache, qr);
+
+		ret = t->iscsit_immediate_queue(conn, cmd, state);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+{
+	int ret;
+
+check_rsp_state:
+	switch (state) {
+	case ISTATE_SEND_DATAIN:
+		ret = iscsit_send_datain(cmd, conn);
+		if (ret < 0)
+			goto err;
+		else if (!ret)
+			/* more drs */
+			goto check_rsp_state;
+		else if (ret == 1) {
+			/* all done */
+			spin_lock_bh(&cmd->istate_lock);
+			cmd->i_state = ISTATE_SENT_STATUS;
+			spin_unlock_bh(&cmd->istate_lock);
+
+			if (atomic_read(&conn->check_immediate_queue))
+				return 1;
+
+			return 0;
+		} else if (ret == 2) {
+			/* Still must send status,
+			   SCF_TRANSPORT_TASK_SENSE was set */
+			spin_lock_bh(&cmd->istate_lock);
+			cmd->i_state = ISTATE_SEND_STATUS;
+			spin_unlock_bh(&cmd->istate_lock);
+			state = ISTATE_SEND_STATUS;
+			goto check_rsp_state;
+		}
+
+		break;
+	case ISTATE_SEND_STATUS:
+	case ISTATE_SEND_STATUS_RECOVERY:
+		ret = iscsit_send_response(cmd, conn);
+		break;
+	case ISTATE_SEND_LOGOUTRSP:
+		ret = iscsit_send_logout(cmd, conn);
+		break;
+	case ISTATE_SEND_ASYNCMSG:
+		ret = iscsit_send_conn_drop_async_message(
+			cmd, conn);
+		break;
+	case ISTATE_SEND_NOPIN:
+		ret = iscsit_send_nopin(cmd, conn);
+		break;
+	case ISTATE_SEND_REJECT:
+		ret = iscsit_send_reject(cmd, conn);
+		break;
+	case ISTATE_SEND_TASKMGTRSP:
+		ret = iscsit_send_task_mgt_rsp(cmd, conn);
+		if (ret != 0)
+			break;
+		ret = iscsit_tmr_post_handler(cmd, conn);
+		if (ret != 0)
+			iscsit_fall_back_to_erl0(conn->sess);
+		break;
+	case ISTATE_SEND_TEXTRSP:
+		ret = iscsit_send_text_rsp(cmd, conn);
+		break;
+	default:
+		pr_err("Unknown Opcode: 0x%02x ITT:"
+		       " 0x%08x, i_state: %d on CID: %hu\n",
+		       cmd->iscsi_opcode, cmd->init_task_tag,
+		       state, conn->cid);
+		goto err;
+	}
+	if (ret < 0)
+		goto err;
+
+	if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
+		iscsit_tx_thread_wait_for_tcp(conn);
+		iscsit_unmap_iovec(cmd);
+		goto err;
+	}
+	iscsit_unmap_iovec(cmd);
+
+	switch (state) {
+	case ISTATE_SEND_LOGOUTRSP:
+		if (!iscsit_logout_post_handler(cmd, conn))
+			return -ECONNRESET;
+		/* fall through */
+	case ISTATE_SEND_STATUS:
+	case ISTATE_SEND_ASYNCMSG:
+	case ISTATE_SEND_NOPIN:
+	case ISTATE_SEND_STATUS_RECOVERY:
+	case ISTATE_SEND_TEXTRSP:
+	case ISTATE_SEND_TASKMGTRSP:
+	case ISTATE_SEND_REJECT:
+		spin_lock_bh(&cmd->istate_lock);
+		cmd->i_state = ISTATE_SENT_STATUS;
+		spin_unlock_bh(&cmd->istate_lock);
+		break;
+	default:
+		pr_err("Unknown Opcode: 0x%02x ITT:"
+		       " 0x%08x, i_state: %d on CID: %hu\n",
+		       cmd->iscsi_opcode, cmd->init_task_tag,
+		       cmd->i_state, conn->cid);
+		goto err;
+	}
+
+	if (atomic_read(&conn->check_immediate_queue))
+		return 1;
+
+	return 0;
+
+err:
+	return -1;
+}
+
+static int iscsit_handle_response_queue(struct iscsi_conn *conn)
+{
+	struct iscsit_transport *t = conn->conn_transport;
+	struct iscsi_queue_req *qr;
+	struct iscsi_cmd *cmd;
+	u8 state;
+	int ret;
+
+	while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
+		cmd = qr->cmd;
+		state = qr->state;
+		kmem_cache_free(lio_qr_cache, qr);
+
+		ret = t->iscsit_response_queue(conn, cmd, state);
+		if (ret == 1 || ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+int iscsi_target_tx_thread(void *arg)
+{
+	int ret = 0;
+	struct iscsi_conn *conn = arg;
+	bool conn_freed = false;
+
+	/*
+	 * Allow ourselves to be interrupted by SIGINT so that a
+	 * connection recovery / failure event can be triggered externally.
+	 */
+	allow_signal(SIGINT);
+
+	while (!kthread_should_stop()) {
+		/*
+		 * Ensure that both TX and RX per connection kthreads
+		 * are scheduled to run on the same CPU.
+		 */
+		iscsit_thread_check_cpumask(conn, current, 1);
+
+		wait_event_interruptible(conn->queues_wq,
+					 !iscsit_conn_all_queues_empty(conn));
+
+		if (signal_pending(current))
+			goto transport_err;
+
+get_immediate:
+		ret = iscsit_handle_immediate_queue(conn);
+		if (ret < 0)
+			goto transport_err;
+
+		ret = iscsit_handle_response_queue(conn);
+		if (ret == 1) {
+			goto get_immediate;
+		} else if (ret == -ECONNRESET) {
+			conn_freed = true;
+			goto out;
+		} else if (ret < 0) {
+			goto transport_err;
+		}
+	}
+
+transport_err:
+	/*
+	 * Avoid the normal connection failure code-path if this connection
+	 * is still within LOGIN mode, and iscsi_np process context is
+	 * responsible for cleaning up the early connection failure.
+	 */
+	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
+		iscsit_take_action_for_connection_exit(conn, &conn_freed);
+out:
+	if (!conn_freed) {
+		while (!kthread_should_stop()) {
+			msleep(100);
+		}
+	}
+	return 0;
+}
+
+static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
+{
+	struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
+	struct iscsi_cmd *cmd;
+	int ret = 0;
+
+	switch (hdr->opcode & ISCSI_OPCODE_MASK) {
+	case ISCSI_OP_SCSI_CMD:
+		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+		if (!cmd)
+			goto reject;
+
+		ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
+		break;
+	case ISCSI_OP_SCSI_DATA_OUT:
+		ret = iscsit_handle_data_out(conn, buf);
+		break;
+	case ISCSI_OP_NOOP_OUT:
+		cmd = NULL;
+		if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+			if (!cmd)
+				goto reject;
+		}
+		ret = iscsit_handle_nop_out(conn, cmd, buf);
+		break;
+	case ISCSI_OP_SCSI_TMFUNC:
+		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+		if (!cmd)
+			goto reject;
+
+		ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
+		break;
+	case ISCSI_OP_TEXT:
+		if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+			cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+			if (!cmd)
+				goto reject;
+		} else {
+			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+			if (!cmd)
+				goto reject;
+		}
+
+		ret = iscsit_handle_text_cmd(conn, cmd, buf);
+		break;
+	case ISCSI_OP_LOGOUT:
+		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+		if (!cmd)
+			goto reject;
+
+		ret = iscsit_handle_logout_cmd(conn, cmd, buf);
+		if (ret > 0)
+			wait_for_completion_timeout(&conn->conn_logout_comp,
+					SECONDS_FOR_LOGOUT_COMP * HZ);
+		break;
+	case ISCSI_OP_SNACK:
+		ret = iscsit_handle_snack(conn, buf);
+		break;
+	default:
+		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
+		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+			pr_err("Cannot recover from unknown"
+			" opcode while ERL=0, closing iSCSI connection.\n");
+			return -1;
+		}
+		pr_err("Unable to recover from unknown opcode while OFMarker=No,"
+		       " closing iSCSI connection.\n");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+reject:
+	return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+}
+
+static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
+{
+	bool ret;
+
+	spin_lock_bh(&conn->state_lock);
+	ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
+	spin_unlock_bh(&conn->state_lock);
+
+	return ret;
+}
+
+int iscsi_target_rx_thread(void *arg)
+{
+	int ret, rc;
+	u8 buffer[ISCSI_HDR_LEN], opcode;
+	u32 checksum = 0, digest = 0;
+	struct iscsi_conn *conn = arg;
+	struct kvec iov;
+	bool conn_freed = false;
+	/*
+	 * Allow ourselves to be interrupted by SIGINT so that a
+	 * connection recovery / failure event can be triggered externally.
+	 */
+	allow_signal(SIGINT);
+	/*
+	 * Wait for iscsi_post_login_handler() to complete before allowing
+	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
+	 */
+	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+	if (rc < 0 || iscsi_target_check_conn_state(conn))
+		goto out;
+
+	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+		struct completion comp;
+
+		init_completion(&comp);
+		rc = wait_for_completion_interruptible(&comp);
+		if (rc < 0)
+			goto transport_err;
+
+		goto transport_err;
+	}
+
+	while (!kthread_should_stop()) {
+		/*
+		 * Ensure that both TX and RX per connection kthreads
+		 * are scheduled to run on the same CPU.
+		 */
+		iscsit_thread_check_cpumask(conn, current, 0);
+
+		memset(buffer, 0, ISCSI_HDR_LEN);
+		memset(&iov, 0, sizeof(struct kvec));
+
+		iov.iov_base	= buffer;
+		iov.iov_len	= ISCSI_HDR_LEN;
+
+		ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
+		if (ret != ISCSI_HDR_LEN) {
+			iscsit_rx_thread_wait_for_tcp(conn);
+			goto transport_err;
+		}
+
+		if (conn->conn_ops->HeaderDigest) {
+			iov.iov_base	= &digest;
+			iov.iov_len	= ISCSI_CRC_LEN;
+
+			ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+			if (ret != ISCSI_CRC_LEN) {
+				iscsit_rx_thread_wait_for_tcp(conn);
+				goto transport_err;
+			}
+
+			iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+					buffer, ISCSI_HDR_LEN,
+					0, NULL, (u8 *)&checksum);
+
+			if (digest != checksum) {
+				pr_err("HeaderDigest CRC32C failed,"
+					" received 0x%08x, computed 0x%08x\n",
+					digest, checksum);
+				/*
+				 * Set the PDU to 0xff so it will intentionally
+				 * hit default in the switch below.
+				 */
+				memset(buffer, 0xff, ISCSI_HDR_LEN);
+				atomic_long_inc(&conn->sess->conn_digest_errors);
+			} else {
+				pr_debug("Got HeaderDigest CRC32C"
+						" 0x%08x\n", checksum);
+			}
+		}
+
+		if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
+			goto transport_err;
+
+		opcode = buffer[0] & ISCSI_OPCODE_MASK;
+
+		if (conn->sess->sess_ops->SessionType &&
+		   ((!(opcode & ISCSI_OP_TEXT)) ||
+		    (!(opcode & ISCSI_OP_LOGOUT)))) {
+			pr_err("Received illegal iSCSI Opcode: 0x%02x"
+			" while in Discovery Session, rejecting.\n", opcode);
+			iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					  buffer);
+			goto transport_err;
+		}
+
+		ret = iscsi_target_rx_opcode(conn, buffer);
+		if (ret < 0)
+			goto transport_err;
+	}
+
+transport_err:
+	if (!signal_pending(current))
+		atomic_set(&conn->transport_failed, 1);
+	iscsit_take_action_for_connection_exit(conn, &conn_freed);
+out:
+	if (!conn_freed) {
+		while (!kthread_should_stop()) {
+			msleep(100);
+		}
+	}
+	return 0;
+}
+
+static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+{
+	LIST_HEAD(tmp_list);
+	struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
+	struct iscsi_session *sess = conn->sess;
+	/*
+	 * We expect this function to only ever be called from either RX or TX
+	 * thread context via iscsit_close_connection() once the other context
+	 * has been reset -> returned sleeping pre-handler state.
+	 */
+	spin_lock_bh(&conn->cmd_lock);
+	list_splice_init(&conn->conn_cmd_list, &tmp_list);
+
+	list_for_each_entry(cmd, &tmp_list, i_conn_node) {
+		struct se_cmd *se_cmd = &cmd->se_cmd;
+
+		if (se_cmd->se_tfo != NULL) {
+			spin_lock(&se_cmd->t_state_lock);
+			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+			spin_unlock(&se_cmd->t_state_lock);
+		}
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
+		list_del_init(&cmd->i_conn_node);
+
+		iscsit_increment_maxcmdsn(cmd, sess);
+		iscsit_free_cmd(cmd, true);
+
+	}
+}
+
+static void iscsit_stop_timers_for_cmds(
+	struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
+		if (cmd->data_direction == DMA_TO_DEVICE)
+			iscsit_stop_dataout_timer(cmd);
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+}
+
+int iscsit_close_connection(
+	struct iscsi_conn *conn)
+{
+	int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
+	struct iscsi_session	*sess = conn->sess;
+
+	pr_debug("Closing iSCSI connection CID %hu on SID:"
+		" %u\n", conn->cid, sess->sid);
+	/*
+	 * Always up conn_logout_comp for the traditional TCP case just in case
+	 * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
+	 * response never got sent because the connection failed.
+	 *
+	 * However for iser-target, isert_wait4logout() is using conn_logout_comp
+	 * to signal logout response TX interrupt completion.  Go ahead and skip
+	 * this for iser since isert_rx_opcode() does not wait on logout failure,
+	 * and to avoid iscsi_conn pointer dereference in iser-target code.
+	 */
+	if (conn->conn_transport->transport_type == ISCSI_TCP)
+		complete(&conn->conn_logout_comp);
+
+	if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
+		if (conn->tx_thread &&
+		    cmpxchg(&conn->tx_thread_active, true, false)) {
+			send_sig(SIGINT, conn->tx_thread, 1);
+			kthread_stop(conn->tx_thread);
+		}
+	} else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
+		if (conn->rx_thread &&
+		    cmpxchg(&conn->rx_thread_active, true, false)) {
+			send_sig(SIGINT, conn->rx_thread, 1);
+			kthread_stop(conn->rx_thread);
+		}
+	}
+
+	spin_lock(&iscsit_global->ts_bitmap_lock);
+	bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+			      get_order(1));
+	spin_unlock(&iscsit_global->ts_bitmap_lock);
+
+	iscsit_stop_timers_for_cmds(conn);
+	iscsit_stop_nopin_response_timer(conn);
+	iscsit_stop_nopin_timer(conn);
+
+	if (conn->conn_transport->iscsit_wait_conn)
+		conn->conn_transport->iscsit_wait_conn(conn);
+
+	/*
+	 * During Connection recovery drop unacknowledged out of order
+	 * commands for this connection, and prepare the other commands
+	 * for realligence.
+	 *
+	 * During normal operation clear the out of order commands (but
+	 * do not free the struct iscsi_ooo_cmdsn's) and release all
+	 * struct iscsi_cmds.
+	 */
+	if (atomic_read(&conn->connection_recovery)) {
+		iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
+		iscsit_prepare_cmds_for_realligance(conn);
+	} else {
+		iscsit_clear_ooo_cmdsns_for_conn(conn);
+		iscsit_release_commands_from_conn(conn);
+	}
+	iscsit_free_queue_reqs_for_conn(conn);
+
+	/*
+	 * Handle decrementing session or connection usage count if
+	 * a logout response was not able to be sent because the
+	 * connection failed.  Fall back to Session Recovery here.
+	 */
+	if (atomic_read(&conn->conn_logout_remove)) {
+		if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
+			iscsit_dec_conn_usage_count(conn);
+			iscsit_dec_session_usage_count(sess);
+		}
+		if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
+			iscsit_dec_conn_usage_count(conn);
+
+		atomic_set(&conn->conn_logout_remove, 0);
+		atomic_set(&sess->session_reinstatement, 0);
+		atomic_set(&sess->session_fall_back_to_erl0, 1);
+	}
+
+	spin_lock_bh(&sess->conn_lock);
+	list_del(&conn->conn_list);
+
+	/*
+	 * Attempt to let the Initiator know this connection failed by
+	 * sending an Connection Dropped Async Message on another
+	 * active connection.
+	 */
+	if (atomic_read(&conn->connection_recovery))
+		iscsit_build_conn_drop_async_message(conn);
+
+	spin_unlock_bh(&sess->conn_lock);
+
+	/*
+	 * If connection reinstatement is being performed on this connection,
+	 * up the connection reinstatement semaphore that is being blocked on
+	 * in iscsit_cause_connection_reinstatement().
+	 */
+	spin_lock_bh(&conn->state_lock);
+	if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
+		spin_unlock_bh(&conn->state_lock);
+		complete(&conn->conn_wait_comp);
+		wait_for_completion(&conn->conn_post_wait_comp);
+		spin_lock_bh(&conn->state_lock);
+	}
+
+	/*
+	 * If connection reinstatement is being performed on this connection
+	 * by receiving a REMOVECONNFORRECOVERY logout request, up the
+	 * connection wait rcfr semaphore that is being blocked on
+	 * an iscsit_connection_reinstatement_rcfr().
+	 */
+	if (atomic_read(&conn->connection_wait_rcfr)) {
+		spin_unlock_bh(&conn->state_lock);
+		complete(&conn->conn_wait_rcfr_comp);
+		wait_for_completion(&conn->conn_post_wait_comp);
+		spin_lock_bh(&conn->state_lock);
+	}
+	atomic_set(&conn->connection_reinstatement, 1);
+	spin_unlock_bh(&conn->state_lock);
+
+	/*
+	 * If any other processes are accessing this connection pointer we
+	 * must wait until they have completed.
+	 */
+	iscsit_check_conn_usage_count(conn);
+
+	if (conn->conn_rx_hash.tfm)
+		crypto_free_hash(conn->conn_rx_hash.tfm);
+	if (conn->conn_tx_hash.tfm)
+		crypto_free_hash(conn->conn_tx_hash.tfm);
+
+	free_cpumask_var(conn->conn_cpumask);
+
+	kfree(conn->conn_ops);
+	conn->conn_ops = NULL;
+
+	if (conn->sock)
+		sock_release(conn->sock);
+
+	if (conn->conn_transport->iscsit_free_conn)
+		conn->conn_transport->iscsit_free_conn(conn);
+
+	iscsit_put_transport(conn->conn_transport);
+
+	pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+	conn->conn_state = TARG_CONN_STATE_FREE;
+	kfree(conn);
+
+	spin_lock_bh(&sess->conn_lock);
+	atomic_dec(&sess->nconn);
+	pr_debug("Decremented iSCSI connection count to %hu from node:"
+		" %s\n", atomic_read(&sess->nconn),
+		sess->sess_ops->InitiatorName);
+	/*
+	 * Make sure that if one connection fails in an non ERL=2 iSCSI
+	 * Session that they all fail.
+	 */
+	if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
+	     !atomic_read(&sess->session_logout))
+		atomic_set(&sess->session_fall_back_to_erl0, 1);
+
+	/*
+	 * If this was not the last connection in the session, and we are
+	 * performing session reinstatement or falling back to ERL=0, call
+	 * iscsit_stop_session() without sleeping to shutdown the other
+	 * active connections.
+	 */
+	if (atomic_read(&sess->nconn)) {
+		if (!atomic_read(&sess->session_reinstatement) &&
+		    !atomic_read(&sess->session_fall_back_to_erl0)) {
+			spin_unlock_bh(&sess->conn_lock);
+			return 0;
+		}
+		if (!atomic_read(&sess->session_stop_active)) {
+			atomic_set(&sess->session_stop_active, 1);
+			spin_unlock_bh(&sess->conn_lock);
+			iscsit_stop_session(sess, 0, 0);
+			return 0;
+		}
+		spin_unlock_bh(&sess->conn_lock);
+		return 0;
+	}
+
+	/*
+	 * If this was the last connection in the session and one of the
+	 * following is occurring:
+	 *
+	 * Session Reinstatement is not being performed, and are falling back
+	 * to ERL=0 call iscsit_close_session().
+	 *
+	 * Session Logout was requested.  iscsit_close_session() will be called
+	 * elsewhere.
+	 *
+	 * Session Continuation is not being performed, start the Time2Retain
+	 * handler and check if sleep_on_sess_wait_sem is active.
+	 */
+	if (!atomic_read(&sess->session_reinstatement) &&
+	     atomic_read(&sess->session_fall_back_to_erl0)) {
+		spin_unlock_bh(&sess->conn_lock);
+		target_put_session(sess->se_sess);
+
+		return 0;
+	} else if (atomic_read(&sess->session_logout)) {
+		pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+		sess->session_state = TARG_SESS_STATE_FREE;
+		spin_unlock_bh(&sess->conn_lock);
+
+		if (atomic_read(&sess->sleep_on_sess_wait_comp))
+			complete(&sess->session_wait_comp);
+
+		return 0;
+	} else {
+		pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+		sess->session_state = TARG_SESS_STATE_FAILED;
+
+		if (!atomic_read(&sess->session_continuation)) {
+			spin_unlock_bh(&sess->conn_lock);
+			iscsit_start_time2retain_handler(sess);
+		} else
+			spin_unlock_bh(&sess->conn_lock);
+
+		if (atomic_read(&sess->sleep_on_sess_wait_comp))
+			complete(&sess->session_wait_comp);
+
+		return 0;
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	return 0;
+}
+
+int iscsit_close_session(struct iscsi_session *sess)
+{
+	struct iscsi_portal_group *tpg = sess->tpg;
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+	if (atomic_read(&sess->nconn)) {
+		pr_err("%d connection(s) still exist for iSCSI session"
+			" to %s\n", atomic_read(&sess->nconn),
+			sess->sess_ops->InitiatorName);
+		BUG();
+	}
+
+	spin_lock_bh(&se_tpg->session_lock);
+	atomic_set(&sess->session_logout, 1);
+	atomic_set(&sess->session_reinstatement, 1);
+	iscsit_stop_time2retain_timer(sess);
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	/*
+	 * transport_deregister_session_configfs() will clear the
+	 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
+	 * can be setting it again with __transport_register_session() in
+	 * iscsi_post_login_handler() again after the iscsit_stop_session()
+	 * completes in iscsi_np context.
+	 */
+	transport_deregister_session_configfs(sess->se_sess);
+
+	/*
+	 * If any other processes are accessing this session pointer we must
+	 * wait until they have completed.  If we are in an interrupt (the
+	 * time2retain handler) and contain and active session usage count we
+	 * restart the timer and exit.
+	 */
+	if (!in_interrupt()) {
+		if (iscsit_check_session_usage_count(sess) == 1)
+			iscsit_stop_session(sess, 1, 1);
+	} else {
+		if (iscsit_check_session_usage_count(sess) == 2) {
+			atomic_set(&sess->session_logout, 0);
+			iscsit_start_time2retain_handler(sess);
+			return 0;
+		}
+	}
+
+	transport_deregister_session(sess->se_sess);
+
+	if (sess->sess_ops->ErrorRecoveryLevel == 2)
+		iscsit_free_connection_recovery_entires(sess);
+
+	iscsit_free_all_ooo_cmdsns(sess);
+
+	spin_lock_bh(&se_tpg->session_lock);
+	pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+	sess->session_state = TARG_SESS_STATE_FREE;
+	pr_debug("Released iSCSI session from node: %s\n",
+			sess->sess_ops->InitiatorName);
+	tpg->nsessions--;
+	if (tpg->tpg_tiqn)
+		tpg->tpg_tiqn->tiqn_nsessions--;
+
+	pr_debug("Decremented number of active iSCSI Sessions on"
+		" iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
+
+	spin_lock(&sess_idr_lock);
+	idr_remove(&sess_idr, sess->session_index);
+	spin_unlock(&sess_idr_lock);
+
+	kfree(sess->sess_ops);
+	sess->sess_ops = NULL;
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	kfree(sess);
+	return 0;
+}
+
+static void iscsit_logout_post_handler_closesession(
+	struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	int sleep = 1;
+	/*
+	 * Traditional iscsi/tcp will invoke this logic from TX thread
+	 * context during session logout, so clear tx_thread_active and
+	 * sleep if iscsit_close_connection() has not already occured.
+	 *
+	 * Since iser-target invokes this logic from it's own workqueue,
+	 * always sleep waiting for RX/TX thread shutdown to complete
+	 * within iscsit_close_connection().
+	 */
+	if (conn->conn_transport->transport_type == ISCSI_TCP) {
+		sleep = cmpxchg(&conn->tx_thread_active, true, false);
+		if (!sleep)
+			return;
+	}
+
+	atomic_set(&conn->conn_logout_remove, 0);
+	complete(&conn->conn_logout_comp);
+
+	iscsit_dec_conn_usage_count(conn);
+	iscsit_stop_session(sess, sleep, sleep);
+	iscsit_dec_session_usage_count(sess);
+	target_put_session(sess->se_sess);
+}
+
+static void iscsit_logout_post_handler_samecid(
+	struct iscsi_conn *conn)
+{
+	int sleep = 1;
+
+	if (conn->conn_transport->transport_type == ISCSI_TCP) {
+		sleep = cmpxchg(&conn->tx_thread_active, true, false);
+		if (!sleep)
+			return;
+	}
+
+	atomic_set(&conn->conn_logout_remove, 0);
+	complete(&conn->conn_logout_comp);
+
+	iscsit_cause_connection_reinstatement(conn, sleep);
+	iscsit_dec_conn_usage_count(conn);
+}
+
+static void iscsit_logout_post_handler_diffcid(
+	struct iscsi_conn *conn,
+	u16 cid)
+{
+	struct iscsi_conn *l_conn;
+	struct iscsi_session *sess = conn->sess;
+	bool conn_found = false;
+
+	if (!sess)
+		return;
+
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
+		if (l_conn->cid == cid) {
+			iscsit_inc_conn_usage_count(l_conn);
+			conn_found = true;
+			break;
+		}
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	if (!conn_found)
+		return;
+
+	if (l_conn->sock)
+		l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
+
+	spin_lock_bh(&l_conn->state_lock);
+	pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+	l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+	spin_unlock_bh(&l_conn->state_lock);
+
+	iscsit_cause_connection_reinstatement(l_conn, 1);
+	iscsit_dec_conn_usage_count(l_conn);
+}
+
+/*
+ *	Return of 0 causes the TX thread to restart.
+ */
+int iscsit_logout_post_handler(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	int ret = 0;
+
+	switch (cmd->logout_reason) {
+	case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+		switch (cmd->logout_response) {
+		case ISCSI_LOGOUT_SUCCESS:
+		case ISCSI_LOGOUT_CLEANUP_FAILED:
+		default:
+			iscsit_logout_post_handler_closesession(conn);
+			break;
+		}
+		ret = 0;
+		break;
+	case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+		if (conn->cid == cmd->logout_cid) {
+			switch (cmd->logout_response) {
+			case ISCSI_LOGOUT_SUCCESS:
+			case ISCSI_LOGOUT_CLEANUP_FAILED:
+			default:
+				iscsit_logout_post_handler_samecid(conn);
+				break;
+			}
+			ret = 0;
+		} else {
+			switch (cmd->logout_response) {
+			case ISCSI_LOGOUT_SUCCESS:
+				iscsit_logout_post_handler_diffcid(conn,
+					cmd->logout_cid);
+				break;
+			case ISCSI_LOGOUT_CID_NOT_FOUND:
+			case ISCSI_LOGOUT_CLEANUP_FAILED:
+			default:
+				break;
+			}
+			ret = 1;
+		}
+		break;
+	case ISCSI_LOGOUT_REASON_RECOVERY:
+		switch (cmd->logout_response) {
+		case ISCSI_LOGOUT_SUCCESS:
+		case ISCSI_LOGOUT_CID_NOT_FOUND:
+		case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
+		case ISCSI_LOGOUT_CLEANUP_FAILED:
+		default:
+			break;
+		}
+		ret = 1;
+		break;
+	default:
+		break;
+
+	}
+	return ret;
+}
+EXPORT_SYMBOL(iscsit_logout_post_handler);
+
+void iscsit_fail_session(struct iscsi_session *sess)
+{
+	struct iscsi_conn *conn;
+
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+		pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
+		conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+	sess->session_state = TARG_SESS_STATE_FAILED;
+}
+
+int iscsit_free_session(struct iscsi_session *sess)
+{
+	u16 conn_count = atomic_read(&sess->nconn);
+	struct iscsi_conn *conn, *conn_tmp = NULL;
+	int is_last;
+
+	spin_lock_bh(&sess->conn_lock);
+	atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+	list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+			conn_list) {
+		if (conn_count == 0)
+			break;
+
+		if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+			is_last = 1;
+		} else {
+			iscsit_inc_conn_usage_count(conn_tmp);
+			is_last = 0;
+		}
+		iscsit_inc_conn_usage_count(conn);
+
+		spin_unlock_bh(&sess->conn_lock);
+		iscsit_cause_connection_reinstatement(conn, 1);
+		spin_lock_bh(&sess->conn_lock);
+
+		iscsit_dec_conn_usage_count(conn);
+		if (is_last == 0)
+			iscsit_dec_conn_usage_count(conn_tmp);
+
+		conn_count--;
+	}
+
+	if (atomic_read(&sess->nconn)) {
+		spin_unlock_bh(&sess->conn_lock);
+		wait_for_completion(&sess->session_wait_comp);
+	} else
+		spin_unlock_bh(&sess->conn_lock);
+
+	target_put_session(sess->se_sess);
+	return 0;
+}
+
+void iscsit_stop_session(
+	struct iscsi_session *sess,
+	int session_sleep,
+	int connection_sleep)
+{
+	u16 conn_count = atomic_read(&sess->nconn);
+	struct iscsi_conn *conn, *conn_tmp = NULL;
+	int is_last;
+
+	spin_lock_bh(&sess->conn_lock);
+	if (session_sleep)
+		atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+	if (connection_sleep) {
+		list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+				conn_list) {
+			if (conn_count == 0)
+				break;
+
+			if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+				is_last = 1;
+			} else {
+				iscsit_inc_conn_usage_count(conn_tmp);
+				is_last = 0;
+			}
+			iscsit_inc_conn_usage_count(conn);
+
+			spin_unlock_bh(&sess->conn_lock);
+			iscsit_cause_connection_reinstatement(conn, 1);
+			spin_lock_bh(&sess->conn_lock);
+
+			iscsit_dec_conn_usage_count(conn);
+			if (is_last == 0)
+				iscsit_dec_conn_usage_count(conn_tmp);
+			conn_count--;
+		}
+	} else {
+		list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
+			iscsit_cause_connection_reinstatement(conn, 0);
+	}
+
+	if (session_sleep && atomic_read(&sess->nconn)) {
+		spin_unlock_bh(&sess->conn_lock);
+		wait_for_completion(&sess->session_wait_comp);
+	} else
+		spin_unlock_bh(&sess->conn_lock);
+}
+
+int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+{
+	struct iscsi_session *sess;
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+	struct se_session *se_sess, *se_sess_tmp;
+	LIST_HEAD(free_list);
+	int session_count = 0;
+
+	spin_lock_bh(&se_tpg->session_lock);
+	if (tpg->nsessions && !force) {
+		spin_unlock_bh(&se_tpg->session_lock);
+		return -1;
+	}
+
+	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+			sess_list) {
+		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+		spin_lock(&sess->conn_lock);
+		if (atomic_read(&sess->session_fall_back_to_erl0) ||
+		    atomic_read(&sess->session_logout) ||
+		    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+			spin_unlock(&sess->conn_lock);
+			continue;
+		}
+		atomic_set(&sess->session_reinstatement, 1);
+		atomic_set(&sess->session_fall_back_to_erl0, 1);
+		spin_unlock(&sess->conn_lock);
+
+		list_move_tail(&se_sess->sess_list, &free_list);
+	}
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
+		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+		iscsit_free_session(sess);
+		session_count++;
+	}
+
+	pr_debug("Released %d iSCSI Session(s) from Target Portal"
+			" Group: %hu\n", session_count, tpg->tpgt);
+	return 0;
+}
+
+MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
+MODULE_VERSION("4.1.x");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iscsi_target_init_module);
+module_exit(iscsi_target_cleanup_module);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
new file mode 100644
index 0000000..4cf2c0f
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -0,0 +1,50 @@
+#ifndef ISCSI_TARGET_H
+#define ISCSI_TARGET_H
+
+extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
+extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
+extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
+extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
+extern void iscsit_del_tiqn(struct iscsi_tiqn *);
+extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern void iscsit_login_kref_put(struct kref *);
+extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
+				struct iscsi_tpg_np *);
+extern bool iscsit_check_np_match(struct sockaddr_storage *,
+				struct iscsi_np *, int);
+extern struct iscsi_np *iscsit_add_np(struct sockaddr_storage *,
+				int);
+extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
+				struct iscsi_portal_group *, bool);
+extern int iscsit_del_np(struct iscsi_np *);
+extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
+extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
+extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *, bool recovery);
+extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
+extern int iscsi_target_tx_thread(void *);
+extern int iscsi_target_rx_thread(void *);
+extern int iscsit_close_connection(struct iscsi_conn *);
+extern int iscsit_close_session(struct iscsi_session *);
+extern void iscsit_fail_session(struct iscsi_session *);
+extern int iscsit_free_session(struct iscsi_session *);
+extern void iscsit_stop_session(struct iscsi_session *, int, int);
+extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
+
+extern struct iscsit_global *iscsit_global;
+extern const struct target_core_fabric_ops iscsi_ops;
+
+extern struct kmem_cache *lio_dr_cache;
+extern struct kmem_cache *lio_ooo_cache;
+extern struct kmem_cache *lio_qr_cache;
+extern struct kmem_cache *lio_r2t_cache;
+
+extern struct idr sess_idr;
+extern struct mutex auth_id_lock;
+extern spinlock_t sess_idr_lock;
+
+
+#endif   /*** ISCSI_TARGET_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
new file mode 100644
index 0000000..47e249d
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -0,0 +1,502 @@
+/*******************************************************************************
+ * This file houses the main functions for the iSCSI CHAP support
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_nego.h"
+#include "iscsi_target_auth.h"
+
+static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
+{
+	int j = DIV_ROUND_UP(len, 2), rc;
+
+	rc = hex2bin(dst, src, j);
+	if (rc < 0)
+		pr_debug("CHAP string contains non hex digit symbols\n");
+
+	dst[j] = '\0';
+	return j;
+}
+
+static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
+{
+	int i;
+
+	for (i = 0; i < src_len; i++) {
+		sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
+	}
+}
+
+static void chap_gen_challenge(
+	struct iscsi_conn *conn,
+	int caller,
+	char *c_str,
+	unsigned int *c_len)
+{
+	unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
+	struct iscsi_chap *chap = conn->auth_protocol;
+
+	memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
+
+	get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
+	chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+				CHAP_CHALLENGE_LENGTH);
+	/*
+	 * Set CHAP_C, and copy the generated challenge into c_str.
+	 */
+	*c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
+	*c_len += 1;
+
+	pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
+			challenge_asciihex);
+}
+
+static int chap_check_algorithm(const char *a_str)
+{
+	char *tmp, *orig, *token;
+
+	tmp = kstrdup(a_str, GFP_KERNEL);
+	if (!tmp) {
+		pr_err("Memory allocation failed for CHAP_A temporary buffer\n");
+		return CHAP_DIGEST_UNKNOWN;
+	}
+	orig = tmp;
+
+	token = strsep(&tmp, "=");
+	if (!token)
+		goto out;
+
+	if (strcmp(token, "CHAP_A")) {
+		pr_err("Unable to locate CHAP_A key\n");
+		goto out;
+	}
+	while (token) {
+		token = strsep(&tmp, ",");
+		if (!token)
+			goto out;
+
+		if (!strncmp(token, "5", 1)) {
+			pr_debug("Selected MD5 Algorithm\n");
+			kfree(orig);
+			return CHAP_DIGEST_MD5;
+		}
+	}
+out:
+	kfree(orig);
+	return CHAP_DIGEST_UNKNOWN;
+}
+
+static struct iscsi_chap *chap_server_open(
+	struct iscsi_conn *conn,
+	struct iscsi_node_auth *auth,
+	const char *a_str,
+	char *aic_str,
+	unsigned int *aic_len)
+{
+	int ret;
+	struct iscsi_chap *chap;
+
+	if (!(auth->naf_flags & NAF_USERID_SET) ||
+	    !(auth->naf_flags & NAF_PASSWORD_SET)) {
+		pr_err("CHAP user or password not set for"
+				" Initiator ACL\n");
+		return NULL;
+	}
+
+	conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL);
+	if (!conn->auth_protocol)
+		return NULL;
+
+	chap = conn->auth_protocol;
+	ret = chap_check_algorithm(a_str);
+	switch (ret) {
+	case CHAP_DIGEST_MD5:
+		pr_debug("[server] Got CHAP_A=5\n");
+		/*
+		 * Send back CHAP_A set to MD5.
+		*/
+		*aic_len = sprintf(aic_str, "CHAP_A=5");
+		*aic_len += 1;
+		chap->digest_type = CHAP_DIGEST_MD5;
+		pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+		break;
+	case CHAP_DIGEST_UNKNOWN:
+	default:
+		pr_err("Unsupported CHAP_A value\n");
+		return NULL;
+	}
+
+	/*
+	 * Set Identifier.
+	 */
+	chap->id = conn->tpg->tpg_chap_id++;
+	*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
+	*aic_len += 1;
+	pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
+	/*
+	 * Generate Challenge.
+	 */
+	chap_gen_challenge(conn, 1, aic_str, aic_len);
+
+	return chap;
+}
+
+static void chap_close(struct iscsi_conn *conn)
+{
+	kfree(conn->auth_protocol);
+	conn->auth_protocol = NULL;
+}
+
+static int chap_server_compute_md5(
+	struct iscsi_conn *conn,
+	struct iscsi_node_auth *auth,
+	char *nr_in_ptr,
+	char *nr_out_ptr,
+	unsigned int *nr_out_len)
+{
+	unsigned long id;
+	unsigned char id_as_uchar;
+	unsigned char digest[MD5_SIGNATURE_SIZE];
+	unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
+	unsigned char identifier[10], *challenge = NULL;
+	unsigned char *challenge_binhex = NULL;
+	unsigned char client_digest[MD5_SIGNATURE_SIZE];
+	unsigned char server_digest[MD5_SIGNATURE_SIZE];
+	unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
+	size_t compare_len;
+	struct iscsi_chap *chap = conn->auth_protocol;
+	struct crypto_hash *tfm;
+	struct hash_desc desc;
+	struct scatterlist sg;
+	int auth_ret = -1, ret, challenge_len;
+
+	memset(identifier, 0, 10);
+	memset(chap_n, 0, MAX_CHAP_N_SIZE);
+	memset(chap_r, 0, MAX_RESPONSE_LENGTH);
+	memset(digest, 0, MD5_SIGNATURE_SIZE);
+	memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
+	memset(client_digest, 0, MD5_SIGNATURE_SIZE);
+	memset(server_digest, 0, MD5_SIGNATURE_SIZE);
+
+	challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+	if (!challenge) {
+		pr_err("Unable to allocate challenge buffer\n");
+		goto out;
+	}
+
+	challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+	if (!challenge_binhex) {
+		pr_err("Unable to allocate challenge_binhex buffer\n");
+		goto out;
+	}
+	/*
+	 * Extract CHAP_N.
+	 */
+	if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
+				&type) < 0) {
+		pr_err("Could not find CHAP_N.\n");
+		goto out;
+	}
+	if (type == HEX) {
+		pr_err("Could not find CHAP_N.\n");
+		goto out;
+	}
+
+	/* Include the terminating NULL in the compare */
+	compare_len = strlen(auth->userid) + 1;
+	if (strncmp(chap_n, auth->userid, compare_len) != 0) {
+		pr_err("CHAP_N values do not match!\n");
+		goto out;
+	}
+	pr_debug("[server] Got CHAP_N=%s\n", chap_n);
+	/*
+	 * Extract CHAP_R.
+	 */
+	if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
+				&type) < 0) {
+		pr_err("Could not find CHAP_R.\n");
+		goto out;
+	}
+	if (type != HEX) {
+		pr_err("Could not find CHAP_R.\n");
+		goto out;
+	}
+
+	pr_debug("[server] Got CHAP_R=%s\n", chap_r);
+	chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
+
+	tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		pr_err("Unable to allocate struct crypto_hash\n");
+		goto out;
+	}
+	desc.tfm = tfm;
+	desc.flags = 0;
+
+	ret = crypto_hash_init(&desc);
+	if (ret < 0) {
+		pr_err("crypto_hash_init() failed\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	sg_init_one(&sg, &chap->id, 1);
+	ret = crypto_hash_update(&desc, &sg, 1);
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for id\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	sg_init_one(&sg, &auth->password, strlen(auth->password));
+	ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for password\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
+	ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for challenge\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	ret = crypto_hash_final(&desc, server_digest);
+	if (ret < 0) {
+		pr_err("crypto_hash_final() failed for server digest\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+	crypto_free_hash(tfm);
+
+	chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+	pr_debug("[server] MD5 Server Digest: %s\n", response);
+
+	if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
+		pr_debug("[server] MD5 Digests do not match!\n\n");
+		goto out;
+	} else
+		pr_debug("[server] MD5 Digests match, CHAP connetication"
+				" successful.\n\n");
+	/*
+	 * One way authentication has succeeded, return now if mutual
+	 * authentication is not enabled.
+	 */
+	if (!auth->authenticate_target) {
+		kfree(challenge);
+		kfree(challenge_binhex);
+		return 0;
+	}
+	/*
+	 * Get CHAP_I.
+	 */
+	if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
+		pr_err("Could not find CHAP_I.\n");
+		goto out;
+	}
+
+	if (type == HEX)
+		ret = kstrtoul(&identifier[2], 0, &id);
+	else
+		ret = kstrtoul(identifier, 0, &id);
+
+	if (ret < 0) {
+		pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret);
+		goto out;
+	}
+	if (id > 255) {
+		pr_err("chap identifier: %lu greater than 255\n", id);
+		goto out;
+	}
+	/*
+	 * RFC 1994 says Identifier is no more than octet (8 bits).
+	 */
+	pr_debug("[server] Got CHAP_I=%lu\n", id);
+	/*
+	 * Get CHAP_C.
+	 */
+	if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
+			challenge, &type) < 0) {
+		pr_err("Could not find CHAP_C.\n");
+		goto out;
+	}
+
+	if (type != HEX) {
+		pr_err("Could not find CHAP_C.\n");
+		goto out;
+	}
+	pr_debug("[server] Got CHAP_C=%s\n", challenge);
+	challenge_len = chap_string_to_hex(challenge_binhex, challenge,
+				strlen(challenge));
+	if (!challenge_len) {
+		pr_err("Unable to convert incoming challenge\n");
+		goto out;
+	}
+	if (challenge_len > 1024) {
+		pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+		goto out;
+	}
+	/*
+	 * During mutual authentication, the CHAP_C generated by the
+	 * initiator must not match the original CHAP_C generated by
+	 * the target.
+	 */
+	if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
+		pr_err("initiator CHAP_C matches target CHAP_C, failing"
+		       " login attempt\n");
+		goto out;
+	}
+	/*
+	 * Generate CHAP_N and CHAP_R for mutual authentication.
+	 */
+	tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		pr_err("Unable to allocate struct crypto_hash\n");
+		goto out;
+	}
+	desc.tfm = tfm;
+	desc.flags = 0;
+
+	ret = crypto_hash_init(&desc);
+	if (ret < 0) {
+		pr_err("crypto_hash_init() failed\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	/* To handle both endiannesses */
+	id_as_uchar = id;
+	sg_init_one(&sg, &id_as_uchar, 1);
+	ret = crypto_hash_update(&desc, &sg, 1);
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for id\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	sg_init_one(&sg, auth->password_mutual,
+				strlen(auth->password_mutual));
+	ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for"
+				" password_mutual\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+	/*
+	 * Convert received challenge to binary hex.
+	 */
+	sg_init_one(&sg, challenge_binhex, challenge_len);
+	ret = crypto_hash_update(&desc, &sg, challenge_len);
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for ma challenge\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	ret = crypto_hash_final(&desc, digest);
+	if (ret < 0) {
+		pr_err("crypto_hash_final() failed for ma digest\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+	crypto_free_hash(tfm);
+	/*
+	 * Generate CHAP_N and CHAP_R.
+	 */
+	*nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
+	*nr_out_len += 1;
+	pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
+	/*
+	 * Convert response from binary hex to ascii hext.
+	 */
+	chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+	*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
+			response);
+	*nr_out_len += 1;
+	pr_debug("[server] Sending CHAP_R=0x%s\n", response);
+	auth_ret = 0;
+out:
+	kfree(challenge);
+	kfree(challenge_binhex);
+	return auth_ret;
+}
+
+static int chap_got_response(
+	struct iscsi_conn *conn,
+	struct iscsi_node_auth *auth,
+	char *nr_in_ptr,
+	char *nr_out_ptr,
+	unsigned int *nr_out_len)
+{
+	struct iscsi_chap *chap = conn->auth_protocol;
+
+	switch (chap->digest_type) {
+	case CHAP_DIGEST_MD5:
+		if (chap_server_compute_md5(conn, auth, nr_in_ptr,
+				nr_out_ptr, nr_out_len) < 0)
+			return -1;
+		return 0;
+	default:
+		pr_err("Unknown CHAP digest type %d!\n",
+				chap->digest_type);
+		return -1;
+	}
+}
+
+u32 chap_main_loop(
+	struct iscsi_conn *conn,
+	struct iscsi_node_auth *auth,
+	char *in_text,
+	char *out_text,
+	int *in_len,
+	int *out_len)
+{
+	struct iscsi_chap *chap = conn->auth_protocol;
+
+	if (!chap) {
+		chap = chap_server_open(conn, auth, in_text, out_text, out_len);
+		if (!chap)
+			return 2;
+		chap->chap_state = CHAP_STAGE_SERVER_AIC;
+		return 0;
+	} else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
+		convert_null_to_semi(in_text, *in_len);
+		if (chap_got_response(conn, auth, in_text, out_text,
+				out_len) < 0) {
+			chap_close(conn);
+			return 2;
+		}
+		if (auth->authenticate_target)
+			chap->chap_state = CHAP_STAGE_SERVER_NR;
+		else
+			*out_len = 0;
+		chap_close(conn);
+		return 1;
+	}
+
+	return 2;
+}
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
new file mode 100644
index 0000000..d22f7b9
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -0,0 +1,32 @@
+#ifndef _ISCSI_CHAP_H_
+#define _ISCSI_CHAP_H_
+
+#define CHAP_DIGEST_UNKNOWN	0
+#define CHAP_DIGEST_MD5		5
+#define CHAP_DIGEST_SHA		6
+
+#define CHAP_CHALLENGE_LENGTH	16
+#define CHAP_CHALLENGE_STR_LEN	4096
+#define MAX_RESPONSE_LENGTH	64	/* sufficient for MD5 */
+#define	MAX_CHAP_N_SIZE		512
+
+#define MD5_SIGNATURE_SIZE	16	/* 16 bytes in a MD5 message digest */
+
+#define CHAP_STAGE_CLIENT_A	1
+#define CHAP_STAGE_SERVER_AIC	2
+#define CHAP_STAGE_CLIENT_NR	3
+#define CHAP_STAGE_CLIENT_NRIC	4
+#define CHAP_STAGE_SERVER_NR	5
+
+extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
+				int *, int *);
+
+struct iscsi_chap {
+	unsigned char	digest_type;
+	unsigned char	id;
+	unsigned char	challenge[CHAP_CHALLENGE_LENGTH];
+	unsigned int	authenticate_target;
+	unsigned int	chap_state;
+} ____cacheline_aligned;
+
+#endif   /*** _ISCSI_CHAP_H_ ***/
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
new file mode 100644
index 0000000..634ad36
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -0,0 +1,1717 @@
+/*******************************************************************************
+ * This file contains the configfs implementation for iSCSI Target mode
+ * from the LIO-Target Project.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/export.h>
+#include <linux/inet.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_nodeattrib.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include <target/iscsi/iscsi_target_stat.h>
+
+
+/* Start items for lio_target_portal_cit */
+
+static inline struct iscsi_tpg_np *to_iscsi_tpg_np(struct config_item *item)
+{
+	return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np);
+}
+
+static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page)
+{
+	struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
+	struct iscsi_tpg_np *tpg_np_sctp;
+	ssize_t rb;
+
+	tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
+	if (tpg_np_sctp)
+		rb = sprintf(page, "1\n");
+	else
+		rb = sprintf(page, "0\n");
+
+	return rb;
+}
+
+static ssize_t lio_target_np_sctp_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
+	struct iscsi_np *np;
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tpg_np *tpg_np_sctp = NULL;
+	u32 op;
+	int ret;
+
+	ret = kstrtou32(page, 0, &op);
+	if (ret)
+		return ret;
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %u\n", op);
+		return -EINVAL;
+	}
+	np = tpg_np->tpg_np;
+	if (!np) {
+		pr_err("Unable to locate struct iscsi_np from"
+				" struct iscsi_tpg_np\n");
+		return -EINVAL;
+	}
+
+	tpg = tpg_np->tpg;
+	if (iscsit_get_tpg(tpg) < 0)
+		return -EINVAL;
+
+	if (op) {
+		/*
+		 * Use existing np->np_sockaddr for SCTP network portal reference
+		 */
+		tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
+					tpg_np, ISCSI_SCTP_TCP);
+		if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
+			goto out;
+	} else {
+		tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
+		if (!tpg_np_sctp)
+			goto out;
+
+		ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
+		if (ret < 0)
+			goto out;
+	}
+
+	iscsit_put_tpg(tpg);
+	return count;
+out:
+	iscsit_put_tpg(tpg);
+	return -EINVAL;
+}
+
+static ssize_t lio_target_np_iser_show(struct config_item *item, char *page)
+{
+	struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
+	struct iscsi_tpg_np *tpg_np_iser;
+	ssize_t rb;
+
+	tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
+	if (tpg_np_iser)
+		rb = sprintf(page, "1\n");
+	else
+		rb = sprintf(page, "0\n");
+
+	return rb;
+}
+
+static ssize_t lio_target_np_iser_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
+	struct iscsi_np *np;
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tpg_np *tpg_np_iser = NULL;
+	char *endptr;
+	u32 op;
+	int rc = 0;
+
+	op = simple_strtoul(page, &endptr, 0);
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %u\n", op);
+		return -EINVAL;
+	}
+	np = tpg_np->tpg_np;
+	if (!np) {
+		pr_err("Unable to locate struct iscsi_np from"
+				" struct iscsi_tpg_np\n");
+		return -EINVAL;
+	}
+
+	tpg = tpg_np->tpg;
+	if (iscsit_get_tpg(tpg) < 0)
+		return -EINVAL;
+
+	if (op) {
+		rc = request_module("ib_isert");
+		if (rc != 0) {
+			pr_warn("Unable to request_module for ib_isert\n");
+			rc = 0;
+		}
+
+		tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
+				tpg_np, ISCSI_INFINIBAND);
+		if (IS_ERR(tpg_np_iser)) {
+			rc = PTR_ERR(tpg_np_iser);
+			goto out;
+		}
+	} else {
+		tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
+		if (tpg_np_iser) {
+			rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
+			if (rc < 0)
+				goto out;
+		}
+	}
+
+	iscsit_put_tpg(tpg);
+	return count;
+out:
+	iscsit_put_tpg(tpg);
+	return rc;
+}
+
+CONFIGFS_ATTR(lio_target_np_, sctp);
+CONFIGFS_ATTR(lio_target_np_, iser);
+
+static struct configfs_attribute *lio_target_portal_attrs[] = {
+	&lio_target_np_attr_sctp,
+	&lio_target_np_attr_iser,
+	NULL,
+};
+
+/* Stop items for lio_target_portal_cit */
+
+/* Start items for lio_target_np_cit */
+
+#define MAX_PORTAL_LEN		256
+
+static struct se_tpg_np *lio_target_call_addnptotpg(
+	struct se_portal_group *se_tpg,
+	struct config_group *group,
+	const char *name)
+{
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tpg_np *tpg_np;
+	char *str, *str2, *ip_str, *port_str;
+	struct sockaddr_storage sockaddr;
+	struct sockaddr_in *sock_in;
+	struct sockaddr_in6 *sock_in6;
+	unsigned long port;
+	int ret;
+	char buf[MAX_PORTAL_LEN + 1];
+
+	if (strlen(name) > MAX_PORTAL_LEN) {
+		pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
+			(int)strlen(name), MAX_PORTAL_LEN);
+		return ERR_PTR(-EOVERFLOW);
+	}
+	memset(buf, 0, MAX_PORTAL_LEN + 1);
+	snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
+
+	memset(&sockaddr, 0, sizeof(struct sockaddr_storage));
+
+	str = strstr(buf, "[");
+	if (str) {
+		const char *end;
+
+		str2 = strstr(str, "]");
+		if (!str2) {
+			pr_err("Unable to locate trailing \"]\""
+				" in IPv6 iSCSI network portal address\n");
+			return ERR_PTR(-EINVAL);
+		}
+		str++; /* Skip over leading "[" */
+		*str2 = '\0'; /* Terminate the unbracketed IPv6 address */
+		str2++; /* Skip over the \0 */
+		port_str = strstr(str2, ":");
+		if (!port_str) {
+			pr_err("Unable to locate \":port\""
+				" in IPv6 iSCSI network portal address\n");
+			return ERR_PTR(-EINVAL);
+		}
+		*port_str = '\0'; /* Terminate string for IP */
+		port_str++; /* Skip over ":" */
+
+		ret = kstrtoul(port_str, 0, &port);
+		if (ret < 0) {
+			pr_err("kstrtoul() failed for port_str: %d\n", ret);
+			return ERR_PTR(ret);
+		}
+		sock_in6 = (struct sockaddr_in6 *)&sockaddr;
+		sock_in6->sin6_family = AF_INET6;
+		sock_in6->sin6_port = htons((unsigned short)port);
+		ret = in6_pton(str, -1,
+				(void *)&sock_in6->sin6_addr.in6_u, -1, &end);
+		if (ret <= 0) {
+			pr_err("in6_pton returned: %d\n", ret);
+			return ERR_PTR(-EINVAL);
+		}
+	} else {
+		str = ip_str = &buf[0];
+		port_str = strstr(ip_str, ":");
+		if (!port_str) {
+			pr_err("Unable to locate \":port\""
+				" in IPv4 iSCSI network portal address\n");
+			return ERR_PTR(-EINVAL);
+		}
+		*port_str = '\0'; /* Terminate string for IP */
+		port_str++; /* Skip over ":" */
+
+		ret = kstrtoul(port_str, 0, &port);
+		if (ret < 0) {
+			pr_err("kstrtoul() failed for port_str: %d\n", ret);
+			return ERR_PTR(ret);
+		}
+		sock_in = (struct sockaddr_in *)&sockaddr;
+		sock_in->sin_family = AF_INET;
+		sock_in->sin_port = htons((unsigned short)port);
+		sock_in->sin_addr.s_addr = in_aton(ip_str);
+	}
+	tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+	ret = iscsit_get_tpg(tpg);
+	if (ret < 0)
+		return ERR_PTR(-EINVAL);
+
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu"
+		" PORTAL: %s\n",
+		config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+		tpg->tpgt, name);
+	/*
+	 * Assume ISCSI_TCP by default.  Other network portals for other
+	 * iSCSI fabrics:
+	 *
+	 * Traditional iSCSI over SCTP (initial support)
+	 * iSER/TCP (TODO, hardware available)
+	 * iSER/SCTP (TODO, software emulation with osc-iwarp)
+	 * iSER/IB (TODO, hardware available)
+	 *
+	 * can be enabled with attributes under
+	 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
+	 *
+	 */
+	tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, NULL,
+				ISCSI_TCP);
+	if (IS_ERR(tpg_np)) {
+		iscsit_put_tpg(tpg);
+		return ERR_CAST(tpg_np);
+	}
+	pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
+
+	iscsit_put_tpg(tpg);
+	return &tpg_np->se_tpg_np;
+}
+
+static void lio_target_call_delnpfromtpg(
+	struct se_tpg_np *se_tpg_np)
+{
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tpg_np *tpg_np;
+	struct se_portal_group *se_tpg;
+	int ret;
+
+	tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np);
+	tpg = tpg_np->tpg;
+	ret = iscsit_get_tpg(tpg);
+	if (ret < 0)
+		return;
+
+	se_tpg = &tpg->tpg_se_tpg;
+	pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
+		" PORTAL: %pISpc\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+		tpg->tpgt, &tpg_np->tpg_np->np_sockaddr);
+
+	ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
+	if (ret < 0)
+		goto out;
+
+	pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n");
+out:
+	iscsit_put_tpg(tpg);
+}
+
+/* End items for lio_target_np_cit */
+
+/* Start items for lio_target_nacl_attrib_cit */
+
+#define ISCSI_NACL_ATTR(name)						\
+static ssize_t iscsi_nacl_attrib_##name##_show(struct config_item *item,\
+		char *page)						\
+{									\
+	struct se_node_acl *se_nacl = attrib_to_nacl(item);		\
+	struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
+					se_node_acl);			\
+									\
+	return sprintf(page, "%u\n", nacl->node_attrib.name);		\
+}									\
+									\
+static ssize_t iscsi_nacl_attrib_##name##_store(struct config_item *item,\
+		const char *page, size_t count)				\
+{									\
+	struct se_node_acl *se_nacl = attrib_to_nacl(item);		\
+	struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
+					se_node_acl);			\
+	u32 val;							\
+	int ret;							\
+									\
+	ret = kstrtou32(page, 0, &val);					\
+	if (ret)							\
+		return ret;						\
+	ret = iscsit_na_##name(nacl, val);				\
+	if (ret < 0)							\
+		return ret;						\
+									\
+	return count;							\
+}									\
+									\
+CONFIGFS_ATTR(iscsi_nacl_attrib_, name)
+
+ISCSI_NACL_ATTR(dataout_timeout);
+ISCSI_NACL_ATTR(dataout_timeout_retries);
+ISCSI_NACL_ATTR(default_erl);
+ISCSI_NACL_ATTR(nopin_timeout);
+ISCSI_NACL_ATTR(nopin_response_timeout);
+ISCSI_NACL_ATTR(random_datain_pdu_offsets);
+ISCSI_NACL_ATTR(random_datain_seq_offsets);
+ISCSI_NACL_ATTR(random_r2t_offsets);
+
+static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
+	&iscsi_nacl_attrib_attr_dataout_timeout,
+	&iscsi_nacl_attrib_attr_dataout_timeout_retries,
+	&iscsi_nacl_attrib_attr_default_erl,
+	&iscsi_nacl_attrib_attr_nopin_timeout,
+	&iscsi_nacl_attrib_attr_nopin_response_timeout,
+	&iscsi_nacl_attrib_attr_random_datain_pdu_offsets,
+	&iscsi_nacl_attrib_attr_random_datain_seq_offsets,
+	&iscsi_nacl_attrib_attr_random_r2t_offsets,
+	NULL,
+};
+
+/* End items for lio_target_nacl_attrib_cit */
+
+/* Start items for lio_target_nacl_auth_cit */
+
+#define __DEF_NACL_AUTH_STR(prefix, name, flags)			\
+static ssize_t __iscsi_##prefix##_##name##_show(			\
+	struct iscsi_node_acl *nacl,					\
+	char *page)							\
+{									\
+	struct iscsi_node_auth *auth = &nacl->node_auth;		\
+									\
+	if (!capable(CAP_SYS_ADMIN))					\
+		return -EPERM;						\
+	return snprintf(page, PAGE_SIZE, "%s\n", auth->name);		\
+}									\
+									\
+static ssize_t __iscsi_##prefix##_##name##_store(			\
+	struct iscsi_node_acl *nacl,					\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct iscsi_node_auth *auth = &nacl->node_auth;		\
+									\
+	if (!capable(CAP_SYS_ADMIN))					\
+		return -EPERM;						\
+	if (count >= sizeof(auth->name))				\
+		return -EINVAL;						\
+	snprintf(auth->name, sizeof(auth->name), "%s", page);		\
+	if (!strncmp("NULL", auth->name, 4))				\
+		auth->naf_flags &= ~flags;				\
+	else								\
+		auth->naf_flags |= flags;				\
+									\
+	if ((auth->naf_flags & NAF_USERID_IN_SET) &&			\
+	    (auth->naf_flags & NAF_PASSWORD_IN_SET))			\
+		auth->authenticate_target = 1;				\
+	else								\
+		auth->authenticate_target = 0;				\
+									\
+	return count;							\
+}
+
+#define DEF_NACL_AUTH_STR(name, flags)					\
+	__DEF_NACL_AUTH_STR(nacl_auth, name, flags)			\
+static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item,	\
+		char *page)						\
+{									\
+	struct se_node_acl *nacl = auth_to_nacl(item);			\
+	return __iscsi_nacl_auth_##name##_show(container_of(nacl,	\
+			struct iscsi_node_acl, se_node_acl), page);	\
+}									\
+static ssize_t iscsi_nacl_auth_##name##_store(struct config_item *item,	\
+		const char *page, size_t count)				\
+{									\
+	struct se_node_acl *nacl = auth_to_nacl(item);			\
+	return __iscsi_nacl_auth_##name##_store(container_of(nacl,	\
+			struct iscsi_node_acl, se_node_acl), page, count); \
+}									\
+									\
+CONFIGFS_ATTR(iscsi_nacl_auth_, name)
+
+/*
+ * One-way authentication userid
+ */
+DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
+DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
+DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+
+#define __DEF_NACL_AUTH_INT(prefix, name)				\
+static ssize_t __iscsi_##prefix##_##name##_show(				\
+	struct iscsi_node_acl *nacl,					\
+	char *page)							\
+{									\
+	struct iscsi_node_auth *auth = &nacl->node_auth;		\
+									\
+	if (!capable(CAP_SYS_ADMIN))					\
+		return -EPERM;						\
+									\
+	return snprintf(page, PAGE_SIZE, "%d\n", auth->name);		\
+}
+
+#define DEF_NACL_AUTH_INT(name)						\
+	__DEF_NACL_AUTH_INT(nacl_auth, name)				\
+static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item,	\
+		char *page)						\
+{									\
+	struct se_node_acl *nacl = auth_to_nacl(item);			\
+	return __iscsi_nacl_auth_##name##_show(container_of(nacl,	\
+			struct iscsi_node_acl, se_node_acl), page);	\
+}									\
+									\
+CONFIGFS_ATTR_RO(iscsi_nacl_auth_, name)
+
+DEF_NACL_AUTH_INT(authenticate_target);
+
+static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
+	&iscsi_nacl_auth_attr_userid,
+	&iscsi_nacl_auth_attr_password,
+	&iscsi_nacl_auth_attr_authenticate_target,
+	&iscsi_nacl_auth_attr_userid_mutual,
+	&iscsi_nacl_auth_attr_password_mutual,
+	NULL,
+};
+
+/* End items for lio_target_nacl_auth_cit */
+
+/* Start items for lio_target_nacl_param_cit */
+
+#define ISCSI_NACL_PARAM(name)						\
+static ssize_t iscsi_nacl_param_##name##_show(struct config_item *item,	\
+		char *page)						\
+{									\
+	struct se_node_acl *se_nacl = param_to_nacl(item);		\
+	struct iscsi_session *sess;					\
+	struct se_session *se_sess;					\
+	ssize_t rb;							\
+									\
+	spin_lock_bh(&se_nacl->nacl_sess_lock);				\
+	se_sess = se_nacl->nacl_sess;					\
+	if (!se_sess) {							\
+		rb = snprintf(page, PAGE_SIZE,				\
+			"No Active iSCSI Session\n");			\
+	} else {							\
+		sess = se_sess->fabric_sess_ptr;			\
+		rb = snprintf(page, PAGE_SIZE, "%u\n",			\
+			(u32)sess->sess_ops->name);			\
+	}								\
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);			\
+									\
+	return rb;							\
+}									\
+									\
+CONFIGFS_ATTR_RO(iscsi_nacl_param_, name)
+
+ISCSI_NACL_PARAM(MaxConnections);
+ISCSI_NACL_PARAM(InitialR2T);
+ISCSI_NACL_PARAM(ImmediateData);
+ISCSI_NACL_PARAM(MaxBurstLength);
+ISCSI_NACL_PARAM(FirstBurstLength);
+ISCSI_NACL_PARAM(DefaultTime2Wait);
+ISCSI_NACL_PARAM(DefaultTime2Retain);
+ISCSI_NACL_PARAM(MaxOutstandingR2T);
+ISCSI_NACL_PARAM(DataPDUInOrder);
+ISCSI_NACL_PARAM(DataSequenceInOrder);
+ISCSI_NACL_PARAM(ErrorRecoveryLevel);
+
+static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
+	&iscsi_nacl_param_attr_MaxConnections,
+	&iscsi_nacl_param_attr_InitialR2T,
+	&iscsi_nacl_param_attr_ImmediateData,
+	&iscsi_nacl_param_attr_MaxBurstLength,
+	&iscsi_nacl_param_attr_FirstBurstLength,
+	&iscsi_nacl_param_attr_DefaultTime2Wait,
+	&iscsi_nacl_param_attr_DefaultTime2Retain,
+	&iscsi_nacl_param_attr_MaxOutstandingR2T,
+	&iscsi_nacl_param_attr_DataPDUInOrder,
+	&iscsi_nacl_param_attr_DataSequenceInOrder,
+	&iscsi_nacl_param_attr_ErrorRecoveryLevel,
+	NULL,
+};
+
+/* End items for lio_target_nacl_param_cit */
+
+/* Start items for lio_target_acl_cit */
+
+static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	struct iscsi_session *sess;
+	struct iscsi_conn *conn;
+	struct se_session *se_sess;
+	ssize_t rb = 0;
+	u32 max_cmd_sn;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (!se_sess) {
+		rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
+			" Endpoint: %s\n", se_nacl->initiatorname);
+	} else {
+		sess = se_sess->fabric_sess_ptr;
+
+		rb += sprintf(page+rb, "InitiatorName: %s\n",
+			sess->sess_ops->InitiatorName);
+		rb += sprintf(page+rb, "InitiatorAlias: %s\n",
+			sess->sess_ops->InitiatorAlias);
+
+		rb += sprintf(page+rb,
+			      "LIO Session ID: %u   ISID: 0x%6ph  TSIH: %hu  ",
+			      sess->sid, sess->isid, sess->tsih);
+		rb += sprintf(page+rb, "SessionType: %s\n",
+				(sess->sess_ops->SessionType) ?
+				"Discovery" : "Normal");
+		rb += sprintf(page+rb, "Session State: ");
+		switch (sess->session_state) {
+		case TARG_SESS_STATE_FREE:
+			rb += sprintf(page+rb, "TARG_SESS_FREE\n");
+			break;
+		case TARG_SESS_STATE_ACTIVE:
+			rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
+			break;
+		case TARG_SESS_STATE_LOGGED_IN:
+			rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
+			break;
+		case TARG_SESS_STATE_FAILED:
+			rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
+			break;
+		case TARG_SESS_STATE_IN_CONTINUE:
+			rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+			break;
+		default:
+			rb += sprintf(page+rb, "ERROR: Unknown Session"
+					" State!\n");
+			break;
+		}
+
+		rb += sprintf(page+rb, "---------------------[iSCSI Session"
+				" Values]-----------------------\n");
+		rb += sprintf(page+rb, "  CmdSN/WR  :  CmdSN/WC  :  ExpCmdSN"
+				"  :  MaxCmdSN  :     ITT    :     TTT\n");
+		max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
+		rb += sprintf(page+rb, " 0x%08x   0x%08x   0x%08x   0x%08x"
+				"   0x%08x   0x%08x\n",
+			sess->cmdsn_window,
+			(max_cmd_sn - sess->exp_cmd_sn) + 1,
+			sess->exp_cmd_sn, max_cmd_sn,
+			sess->init_task_tag, sess->targ_xfer_tag);
+		rb += sprintf(page+rb, "----------------------[iSCSI"
+				" Connections]-------------------------\n");
+
+		spin_lock(&sess->conn_lock);
+		list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+			rb += sprintf(page+rb, "CID: %hu  Connection"
+					" State: ", conn->cid);
+			switch (conn->conn_state) {
+			case TARG_CONN_STATE_FREE:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_FREE\n");
+				break;
+			case TARG_CONN_STATE_XPT_UP:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_XPT_UP\n");
+				break;
+			case TARG_CONN_STATE_IN_LOGIN:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_IN_LOGIN\n");
+				break;
+			case TARG_CONN_STATE_LOGGED_IN:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_LOGGED_IN\n");
+				break;
+			case TARG_CONN_STATE_IN_LOGOUT:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_IN_LOGOUT\n");
+				break;
+			case TARG_CONN_STATE_LOGOUT_REQUESTED:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
+				break;
+			case TARG_CONN_STATE_CLEANUP_WAIT:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_CLEANUP_WAIT\n");
+				break;
+			default:
+				rb += sprintf(page+rb,
+					"ERROR: Unknown Connection State!\n");
+				break;
+			}
+
+			rb += sprintf(page+rb, "   Address %pISc %s", &conn->login_sockaddr,
+				(conn->network_transport == ISCSI_TCP) ?
+				"TCP" : "SCTP");
+			rb += sprintf(page+rb, "  StatSN: 0x%08x\n",
+				conn->stat_sn);
+		}
+		spin_unlock(&sess->conn_lock);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return rb;
+}
+
+static ssize_t lio_target_nacl_cmdsn_depth_show(struct config_item *item,
+		char *page)
+{
+	return sprintf(page, "%u\n", acl_to_nacl(item)->queue_depth);
+}
+
+static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct iscsi_portal_group *tpg = container_of(se_tpg,
+			struct iscsi_portal_group, tpg_se_tpg);
+	struct config_item *acl_ci, *tpg_ci, *wwn_ci;
+	u32 cmdsn_depth = 0;
+	int ret;
+
+	ret = kstrtou32(page, 0, &cmdsn_depth);
+	if (ret)
+		return ret;
+	if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
+		pr_err("Passed cmdsn_depth: %u exceeds"
+			" TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
+			TA_DEFAULT_CMDSN_DEPTH_MAX);
+		return -EINVAL;
+	}
+	acl_ci = &se_nacl->acl_group.cg_item;
+	if (!acl_ci) {
+		pr_err("Unable to locatel acl_ci\n");
+		return -EINVAL;
+	}
+	tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
+	if (!tpg_ci) {
+		pr_err("Unable to locate tpg_ci\n");
+		return -EINVAL;
+	}
+	wwn_ci = &tpg_ci->ci_group->cg_item;
+	if (!wwn_ci) {
+		pr_err("Unable to locate config_item wwn_ci\n");
+		return -EINVAL;
+	}
+
+	if (iscsit_get_tpg(tpg) < 0)
+		return -EINVAL;
+
+	ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth);
+
+	pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
+		"InitiatorName: %s\n", config_item_name(wwn_ci),
+		config_item_name(tpg_ci), cmdsn_depth,
+		config_item_name(acl_ci));
+
+	iscsit_put_tpg(tpg);
+	return (!ret) ? count : (ssize_t)ret;
+}
+
+static ssize_t lio_target_nacl_tag_show(struct config_item *item, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
+}
+
+static ssize_t lio_target_nacl_tag_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	int ret;
+
+	ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
+
+	if (ret < 0)
+		return ret;
+	return count;
+}
+
+CONFIGFS_ATTR_RO(lio_target_nacl_, info);
+CONFIGFS_ATTR(lio_target_nacl_, cmdsn_depth);
+CONFIGFS_ATTR(lio_target_nacl_, tag);
+
+static struct configfs_attribute *lio_target_initiator_attrs[] = {
+	&lio_target_nacl_attr_info,
+	&lio_target_nacl_attr_cmdsn_depth,
+	&lio_target_nacl_attr_tag,
+	NULL,
+};
+
+static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
+		const char *name)
+{
+	struct iscsi_node_acl *acl =
+		container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+	struct config_group *stats_cg = &se_nacl->acl_fabric_stat_group;
+
+	stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
+				GFP_KERNEL);
+	if (!stats_cg->default_groups) {
+		pr_err("Unable to allocate memory for"
+				" stats_cg->default_groups\n");
+		return -ENOMEM;
+	}
+
+	stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;
+	stats_cg->default_groups[1] = NULL;
+	config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
+			"iscsi_sess_stats", &iscsi_stat_sess_cit);
+
+	return 0;
+}
+
+static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl)
+{
+	struct iscsi_node_acl *acl = container_of(se_nacl,
+			struct iscsi_node_acl, se_node_acl);
+	struct config_item *df_item;
+	struct config_group *stats_cg;
+	int i;
+
+	stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
+	for (i = 0; stats_cg->default_groups[i]; i++) {
+		df_item = &stats_cg->default_groups[i]->cg_item;
+		stats_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(stats_cg->default_groups);
+}
+
+/* End items for lio_target_acl_cit */
+
+/* Start items for lio_target_tpg_attrib_cit */
+
+#define DEF_TPG_ATTRIB(name)						\
+									\
+static ssize_t iscsi_tpg_attrib_##name##_show(struct config_item *item,	\
+		char *page)						\
+{									\
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);		\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,		\
+			struct iscsi_portal_group, tpg_se_tpg);	\
+	ssize_t rb;							\
+									\
+	if (iscsit_get_tpg(tpg) < 0)					\
+		return -EINVAL;						\
+									\
+	rb = sprintf(page, "%u\n", tpg->tpg_attrib.name);		\
+	iscsit_put_tpg(tpg);						\
+	return rb;							\
+}									\
+									\
+static ssize_t iscsi_tpg_attrib_##name##_store(struct config_item *item,\
+		const char *page, size_t count)				\
+{									\
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);		\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,		\
+			struct iscsi_portal_group, tpg_se_tpg);	\
+	u32 val;							\
+	int ret;							\
+									\
+	if (iscsit_get_tpg(tpg) < 0)					\
+		return -EINVAL;						\
+									\
+	ret = kstrtou32(page, 0, &val);					\
+	if (ret)							\
+		goto out;						\
+	ret = iscsit_ta_##name(tpg, val);				\
+	if (ret < 0)							\
+		goto out;						\
+									\
+	iscsit_put_tpg(tpg);						\
+	return count;							\
+out:									\
+	iscsit_put_tpg(tpg);						\
+	return ret;							\
+}									\
+CONFIGFS_ATTR(iscsi_tpg_attrib_, name)
+
+DEF_TPG_ATTRIB(authentication);
+DEF_TPG_ATTRIB(login_timeout);
+DEF_TPG_ATTRIB(netif_timeout);
+DEF_TPG_ATTRIB(generate_node_acls);
+DEF_TPG_ATTRIB(default_cmdsn_depth);
+DEF_TPG_ATTRIB(cache_dynamic_acls);
+DEF_TPG_ATTRIB(demo_mode_write_protect);
+DEF_TPG_ATTRIB(prod_mode_write_protect);
+DEF_TPG_ATTRIB(demo_mode_discovery);
+DEF_TPG_ATTRIB(default_erl);
+DEF_TPG_ATTRIB(t10_pi);
+DEF_TPG_ATTRIB(fabric_prot_type);
+DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
+DEF_TPG_ATTRIB(login_keys_workaround);
+
+static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
+	&iscsi_tpg_attrib_attr_authentication,
+	&iscsi_tpg_attrib_attr_login_timeout,
+	&iscsi_tpg_attrib_attr_netif_timeout,
+	&iscsi_tpg_attrib_attr_generate_node_acls,
+	&iscsi_tpg_attrib_attr_default_cmdsn_depth,
+	&iscsi_tpg_attrib_attr_cache_dynamic_acls,
+	&iscsi_tpg_attrib_attr_demo_mode_write_protect,
+	&iscsi_tpg_attrib_attr_prod_mode_write_protect,
+	&iscsi_tpg_attrib_attr_demo_mode_discovery,
+	&iscsi_tpg_attrib_attr_default_erl,
+	&iscsi_tpg_attrib_attr_t10_pi,
+	&iscsi_tpg_attrib_attr_fabric_prot_type,
+	&iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
+	&iscsi_tpg_attrib_attr_login_keys_workaround,
+	NULL,
+};
+
+/* End items for lio_target_tpg_attrib_cit */
+
+/* Start items for lio_target_tpg_auth_cit */
+
+#define __DEF_TPG_AUTH_STR(prefix, name, flags)					\
+static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg,	\
+		char *page)							\
+{										\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,			\
+				struct iscsi_portal_group, tpg_se_tpg);		\
+	struct iscsi_node_auth *auth = &tpg->tpg_demo_auth;			\
+										\
+	if (!capable(CAP_SYS_ADMIN))						\
+		return -EPERM;							\
+										\
+	return snprintf(page, PAGE_SIZE, "%s\n", auth->name);			\
+}										\
+										\
+static ssize_t __iscsi_##prefix##_##name##_store(struct se_portal_group *se_tpg,\
+		const char *page, size_t count)					\
+{										\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,			\
+				struct iscsi_portal_group, tpg_se_tpg);		\
+	struct iscsi_node_auth *auth = &tpg->tpg_demo_auth;			\
+										\
+	if (!capable(CAP_SYS_ADMIN))						\
+		return -EPERM;							\
+										\
+	snprintf(auth->name, sizeof(auth->name), "%s", page);			\
+	if (!(strncmp("NULL", auth->name, 4)))					\
+		auth->naf_flags &= ~flags;					\
+	else									\
+		auth->naf_flags |= flags;					\
+										\
+	if ((auth->naf_flags & NAF_USERID_IN_SET) &&				\
+	    (auth->naf_flags & NAF_PASSWORD_IN_SET))				\
+		auth->authenticate_target = 1;					\
+	else									\
+		auth->authenticate_target = 0;					\
+										\
+	return count;								\
+}
+
+#define DEF_TPG_AUTH_STR(name, flags)						\
+	__DEF_TPG_AUTH_STR(tpg_auth, name, flags)				\
+static ssize_t iscsi_tpg_auth_##name##_show(struct config_item *item,		\
+		char *page)							\
+{										\
+	return __iscsi_tpg_auth_##name##_show(auth_to_tpg(item), page);		\
+}										\
+										\
+static ssize_t iscsi_tpg_auth_##name##_store(struct config_item *item,		\
+		const char *page, size_t count)					\
+{										\
+	return __iscsi_tpg_auth_##name##_store(auth_to_tpg(item), page, count);	\
+}										\
+										\
+CONFIGFS_ATTR(iscsi_tpg_auth_, name);
+
+
+DEF_TPG_AUTH_STR(userid, NAF_USERID_SET);
+DEF_TPG_AUTH_STR(password, NAF_PASSWORD_SET);
+DEF_TPG_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+
+#define __DEF_TPG_AUTH_INT(prefix, name)					\
+static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg,	\
+		char *page)								\
+{										\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,			\
+				struct iscsi_portal_group, tpg_se_tpg);		\
+	struct iscsi_node_auth *auth = &tpg->tpg_demo_auth;			\
+										\
+	if (!capable(CAP_SYS_ADMIN))						\
+		return -EPERM;							\
+										\
+	return snprintf(page, PAGE_SIZE, "%d\n", auth->name);			\
+}
+
+#define DEF_TPG_AUTH_INT(name)							\
+	__DEF_TPG_AUTH_INT(tpg_auth, name)					\
+static ssize_t iscsi_tpg_auth_##name##_show(struct config_item *item,		\
+		char *page) \
+{										\
+	return __iscsi_tpg_auth_##name##_show(auth_to_tpg(item), page);		\
+}										\
+CONFIGFS_ATTR_RO(iscsi_tpg_auth_, name);
+
+DEF_TPG_AUTH_INT(authenticate_target);
+
+static struct configfs_attribute *lio_target_tpg_auth_attrs[] = {
+	&iscsi_tpg_auth_attr_userid,
+	&iscsi_tpg_auth_attr_password,
+	&iscsi_tpg_auth_attr_authenticate_target,
+	&iscsi_tpg_auth_attr_userid_mutual,
+	&iscsi_tpg_auth_attr_password_mutual,
+	NULL,
+};
+
+/* End items for lio_target_tpg_auth_cit */
+
+/* Start items for lio_target_tpg_param_cit */
+
+#define DEF_TPG_PARAM(name)						\
+static ssize_t iscsi_tpg_param_##name##_show(struct config_item *item,	\
+		char *page)						\
+{									\
+	struct se_portal_group *se_tpg = param_to_tpg(item);		\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,		\
+			struct iscsi_portal_group, tpg_se_tpg);		\
+	struct iscsi_param *param;					\
+	ssize_t rb;							\
+									\
+	if (iscsit_get_tpg(tpg) < 0)					\
+		return -EINVAL;						\
+									\
+	param = iscsi_find_param_from_key(__stringify(name),		\
+				tpg->param_list);			\
+	if (!param) {							\
+		iscsit_put_tpg(tpg);					\
+		return -EINVAL;						\
+	}								\
+	rb = snprintf(page, PAGE_SIZE, "%s\n", param->value);		\
+									\
+	iscsit_put_tpg(tpg);						\
+	return rb;							\
+}									\
+static ssize_t iscsi_tpg_param_##name##_store(struct config_item *item, \
+		const char *page, size_t count)				\
+{									\
+	struct se_portal_group *se_tpg = param_to_tpg(item);		\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,		\
+			struct iscsi_portal_group, tpg_se_tpg);		\
+	char *buf;							\
+	int ret, len;							\
+									\
+	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);				\
+	if (!buf)							\
+		return -ENOMEM;						\
+	len = snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page);	\
+	if (isspace(buf[len-1]))					\
+		buf[len-1] = '\0'; /* Kill newline */			\
+									\
+	if (iscsit_get_tpg(tpg) < 0) {					\
+		kfree(buf);						\
+		return -EINVAL;						\
+	}								\
+									\
+	ret = iscsi_change_param_value(buf, tpg->param_list, 1);	\
+	if (ret < 0)							\
+		goto out;						\
+									\
+	kfree(buf);							\
+	iscsit_put_tpg(tpg);						\
+	return count;							\
+out:									\
+	kfree(buf);							\
+	iscsit_put_tpg(tpg);						\
+	return -EINVAL;							\
+}									\
+CONFIGFS_ATTR(iscsi_tpg_param_, name)
+
+DEF_TPG_PARAM(AuthMethod);
+DEF_TPG_PARAM(HeaderDigest);
+DEF_TPG_PARAM(DataDigest);
+DEF_TPG_PARAM(MaxConnections);
+DEF_TPG_PARAM(TargetAlias);
+DEF_TPG_PARAM(InitialR2T);
+DEF_TPG_PARAM(ImmediateData);
+DEF_TPG_PARAM(MaxRecvDataSegmentLength);
+DEF_TPG_PARAM(MaxXmitDataSegmentLength);
+DEF_TPG_PARAM(MaxBurstLength);
+DEF_TPG_PARAM(FirstBurstLength);
+DEF_TPG_PARAM(DefaultTime2Wait);
+DEF_TPG_PARAM(DefaultTime2Retain);
+DEF_TPG_PARAM(MaxOutstandingR2T);
+DEF_TPG_PARAM(DataPDUInOrder);
+DEF_TPG_PARAM(DataSequenceInOrder);
+DEF_TPG_PARAM(ErrorRecoveryLevel);
+DEF_TPG_PARAM(IFMarker);
+DEF_TPG_PARAM(OFMarker);
+DEF_TPG_PARAM(IFMarkInt);
+DEF_TPG_PARAM(OFMarkInt);
+
+static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
+	&iscsi_tpg_param_attr_AuthMethod,
+	&iscsi_tpg_param_attr_HeaderDigest,
+	&iscsi_tpg_param_attr_DataDigest,
+	&iscsi_tpg_param_attr_MaxConnections,
+	&iscsi_tpg_param_attr_TargetAlias,
+	&iscsi_tpg_param_attr_InitialR2T,
+	&iscsi_tpg_param_attr_ImmediateData,
+	&iscsi_tpg_param_attr_MaxRecvDataSegmentLength,
+	&iscsi_tpg_param_attr_MaxXmitDataSegmentLength,
+	&iscsi_tpg_param_attr_MaxBurstLength,
+	&iscsi_tpg_param_attr_FirstBurstLength,
+	&iscsi_tpg_param_attr_DefaultTime2Wait,
+	&iscsi_tpg_param_attr_DefaultTime2Retain,
+	&iscsi_tpg_param_attr_MaxOutstandingR2T,
+	&iscsi_tpg_param_attr_DataPDUInOrder,
+	&iscsi_tpg_param_attr_DataSequenceInOrder,
+	&iscsi_tpg_param_attr_ErrorRecoveryLevel,
+	&iscsi_tpg_param_attr_IFMarker,
+	&iscsi_tpg_param_attr_OFMarker,
+	&iscsi_tpg_param_attr_IFMarkInt,
+	&iscsi_tpg_param_attr_OFMarkInt,
+	NULL,
+};
+
+/* End items for lio_target_tpg_param_cit */
+
+/* Start items for lio_target_tpg_cit */
+
+static ssize_t lio_target_tpg_enable_show(struct config_item *item, char *page)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct iscsi_portal_group *tpg = container_of(se_tpg,
+			struct iscsi_portal_group, tpg_se_tpg);
+	ssize_t len;
+
+	spin_lock(&tpg->tpg_state_lock);
+	len = sprintf(page, "%d\n",
+			(tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
+	spin_unlock(&tpg->tpg_state_lock);
+
+	return len;
+}
+
+static ssize_t lio_target_tpg_enable_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct iscsi_portal_group *tpg = container_of(se_tpg,
+			struct iscsi_portal_group, tpg_se_tpg);
+	u32 op;
+	int ret;
+
+	ret = kstrtou32(page, 0, &op);
+	if (ret)
+		return ret;
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %u\n", op);
+		return -EINVAL;
+	}
+
+	ret = iscsit_get_tpg(tpg);
+	if (ret < 0)
+		return -EINVAL;
+
+	if (op) {
+		ret = iscsit_tpg_enable_portal_group(tpg);
+		if (ret < 0)
+			goto out;
+	} else {
+		/*
+		 * iscsit_tpg_disable_portal_group() assumes force=1
+		 */
+		ret = iscsit_tpg_disable_portal_group(tpg, 1);
+		if (ret < 0)
+			goto out;
+	}
+
+	iscsit_put_tpg(tpg);
+	return count;
+out:
+	iscsit_put_tpg(tpg);
+	return -EINVAL;
+}
+
+
+static ssize_t lio_target_tpg_dynamic_sessions_show(struct config_item *item,
+		char *page)
+{
+	return target_show_dynamic_sessions(to_tpg(item), page);
+}
+
+CONFIGFS_ATTR(lio_target_tpg_, enable);
+CONFIGFS_ATTR_RO(lio_target_tpg_, dynamic_sessions);
+
+static struct configfs_attribute *lio_target_tpg_attrs[] = {
+	&lio_target_tpg_attr_enable,
+	&lio_target_tpg_attr_dynamic_sessions,
+	NULL,
+};
+
+/* End items for lio_target_tpg_cit */
+
+/* Start items for lio_target_tiqn_cit */
+
+static struct se_portal_group *lio_target_tiqn_addtpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tiqn *tiqn;
+	char *tpgt_str;
+	int ret;
+	u16 tpgt;
+
+	tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+	/*
+	 * Only tpgt_# directory groups can be created below
+	 * target/iscsi/iqn.superturodiskarry/
+	 */
+	tpgt_str = strstr(name, "tpgt_");
+	if (!tpgt_str) {
+		pr_err("Unable to locate \"tpgt_#\" directory"
+				" group\n");
+		return NULL;
+	}
+	tpgt_str += 5; /* Skip ahead of "tpgt_" */
+	ret = kstrtou16(tpgt_str, 0, &tpgt);
+	if (ret)
+		return NULL;
+
+	tpg = iscsit_alloc_portal_group(tiqn, tpgt);
+	if (!tpg)
+		return NULL;
+
+	ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
+	if (ret < 0)
+		return NULL;
+
+	ret = iscsit_tpg_add_portal_group(tiqn, tpg);
+	if (ret != 0)
+		goto out;
+
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n",
+			name);
+	return &tpg->tpg_se_tpg;
+out:
+	core_tpg_deregister(&tpg->tpg_se_tpg);
+	kfree(tpg);
+	return NULL;
+}
+
+static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
+{
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tiqn *tiqn;
+
+	tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+	tiqn = tpg->tpg_tiqn;
+	/*
+	 * iscsit_tpg_del_portal_group() assumes force=1
+	 */
+	pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n");
+	iscsit_tpg_del_portal_group(tiqn, tpg, 1);
+}
+
+/* End items for lio_target_tiqn_cit */
+
+/* Start LIO-Target TIQN struct contig_item lio_target_cit */
+
+static ssize_t lio_target_wwn_lio_version_show(struct config_item *item,
+		char *page)
+{
+	return sprintf(page, "Datera Inc. iSCSI Target "ISCSIT_VERSION"\n");
+}
+
+CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
+
+static struct configfs_attribute *lio_target_wwn_attrs[] = {
+	&lio_target_wwn_attr_lio_version,
+	NULL,
+};
+
+static struct se_wwn *lio_target_call_coreaddtiqn(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct config_group *stats_cg;
+	struct iscsi_tiqn *tiqn;
+
+	tiqn = iscsit_add_tiqn((unsigned char *)name);
+	if (IS_ERR(tiqn))
+		return ERR_CAST(tiqn);
+	/*
+	 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
+	 */
+	stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
+
+	stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
+				GFP_KERNEL);
+	if (!stats_cg->default_groups) {
+		pr_err("Unable to allocate memory for"
+				" stats_cg->default_groups\n");
+		iscsit_del_tiqn(tiqn);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group;
+	stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group;
+	stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group;
+	stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group;
+	stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group;
+	stats_cg->default_groups[5] = NULL;
+	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
+			"iscsi_instance", &iscsi_stat_instance_cit);
+	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
+			"iscsi_sess_err", &iscsi_stat_sess_err_cit);
+	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
+			"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
+	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
+			"iscsi_login_stats", &iscsi_stat_login_cit);
+	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
+			"iscsi_logout_stats", &iscsi_stat_logout_cit);
+
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+			" %s\n", name);
+	return &tiqn->tiqn_wwn;
+}
+
+static void lio_target_call_coredeltiqn(
+	struct se_wwn *wwn)
+{
+	struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+	struct config_item *df_item;
+	struct config_group *stats_cg;
+	int i;
+
+	stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
+	for (i = 0; stats_cg->default_groups[i]; i++) {
+		df_item = &stats_cg->default_groups[i]->cg_item;
+		stats_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(stats_cg->default_groups);
+
+	pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
+			tiqn->tiqn);
+	iscsit_del_tiqn(tiqn);
+}
+
+/* End LIO-Target TIQN struct contig_lio_target_cit */
+
+/* Start lio_target_discovery_auth_cit */
+
+#define DEF_DISC_AUTH_STR(name, flags)					\
+	__DEF_NACL_AUTH_STR(disc, name, flags)				\
+static ssize_t iscsi_disc_##name##_show(struct config_item *item, char *page) \
+{									\
+	return __iscsi_disc_##name##_show(&iscsit_global->discovery_acl,\
+		page);							\
+}									\
+static ssize_t iscsi_disc_##name##_store(struct config_item *item,	\
+		const char *page, size_t count)				\
+{									\
+	return __iscsi_disc_##name##_store(&iscsit_global->discovery_acl,	\
+		page, count);						\
+									\
+}									\
+CONFIGFS_ATTR(iscsi_disc_, name)
+
+DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
+DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
+DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+
+#define DEF_DISC_AUTH_INT(name)						\
+	__DEF_NACL_AUTH_INT(disc, name)					\
+static ssize_t iscsi_disc_##name##_show(struct config_item *item, char *page) \
+{									\
+	return __iscsi_disc_##name##_show(&iscsit_global->discovery_acl, \
+			page);						\
+}									\
+CONFIGFS_ATTR_RO(iscsi_disc_, name)
+
+DEF_DISC_AUTH_INT(authenticate_target);
+
+
+static ssize_t iscsi_disc_enforce_discovery_auth_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
+
+	return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
+}
+
+static ssize_t iscsi_disc_enforce_discovery_auth_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct iscsi_param *param;
+	struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
+	u32 op;
+	int err;
+
+	err = kstrtou32(page, 0, &op);
+	if (err)
+		return -EINVAL;
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for enforce_discovery_auth:"
+				" %u\n", op);
+		return -EINVAL;
+	}
+
+	if (!discovery_tpg) {
+		pr_err("iscsit_global->discovery_tpg is NULL\n");
+		return -EINVAL;
+	}
+
+	param = iscsi_find_param_from_key(AUTHMETHOD,
+				discovery_tpg->param_list);
+	if (!param)
+		return -EINVAL;
+
+	if (op) {
+		/*
+		 * Reset the AuthMethod key to CHAP.
+		 */
+		if (iscsi_update_param_value(param, CHAP) < 0)
+			return -EINVAL;
+
+		discovery_tpg->tpg_attrib.authentication = 1;
+		iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1;
+		pr_debug("LIO-CORE[0] Successfully enabled"
+			" authentication enforcement for iSCSI"
+			" Discovery TPG\n");
+	} else {
+		/*
+		 * Reset the AuthMethod key to CHAP,None
+		 */
+		if (iscsi_update_param_value(param, "CHAP,None") < 0)
+			return -EINVAL;
+
+		discovery_tpg->tpg_attrib.authentication = 0;
+		iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0;
+		pr_debug("LIO-CORE[0] Successfully disabled"
+			" authentication enforcement for iSCSI"
+			" Discovery TPG\n");
+	}
+
+	return count;
+}
+
+CONFIGFS_ATTR(iscsi_disc_, enforce_discovery_auth);
+
+static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
+	&iscsi_disc_attr_userid,
+	&iscsi_disc_attr_password,
+	&iscsi_disc_attr_authenticate_target,
+	&iscsi_disc_attr_userid_mutual,
+	&iscsi_disc_attr_password_mutual,
+	&iscsi_disc_attr_enforce_discovery_auth,
+	NULL,
+};
+
+/* End lio_target_discovery_auth_cit */
+
+/* Start functions for target_core_fabric_ops */
+
+static char *iscsi_get_fabric_name(void)
+{
+	return "iSCSI";
+}
+
+static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	return cmd->i_state;
+}
+
+static u32 lio_sess_get_index(struct se_session *se_sess)
+{
+	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+	return sess->session_index;
+}
+
+static u32 lio_sess_get_initiator_sid(
+	struct se_session *se_sess,
+	unsigned char *buf,
+	u32 size)
+{
+	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+	/*
+	 * iSCSI Initiator Session Identifier from RFC-3720.
+	 */
+	return snprintf(buf, size, "%6phN", sess->isid);
+}
+
+static int lio_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	cmd->i_state = ISTATE_SEND_DATAIN;
+	cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
+
+	return 0;
+}
+
+static int lio_write_pending(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+	struct iscsi_conn *conn = cmd->conn;
+
+	if (!cmd->immediate_data && !cmd->unsolicited_data)
+		return conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
+
+	return 0;
+}
+
+static int lio_write_pending_status(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+	int ret;
+
+	spin_lock_bh(&cmd->istate_lock);
+	ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
+	spin_unlock_bh(&cmd->istate_lock);
+
+	return ret;
+}
+
+static int lio_queue_status(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	cmd->i_state = ISTATE_SEND_STATUS;
+
+	if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
+		iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+		return 0;
+	}
+	cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
+
+	return 0;
+}
+
+static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	cmd->i_state = ISTATE_SEND_TASKMGTRSP;
+	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+}
+
+static void lio_aborted_task(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
+}
+
+static inline struct iscsi_portal_group *iscsi_tpg(struct se_portal_group *se_tpg)
+{
+	return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+}
+
+static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+	return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
+}
+
+static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
+{
+	return iscsi_tpg(se_tpg)->tpgt;
+}
+
+static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
+}
+
+static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
+{
+	return iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
+}
+
+static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+	return iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
+}
+
+static int lio_tpg_check_demo_mode_write_protect(
+	struct se_portal_group *se_tpg)
+{
+	return iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
+}
+
+static int lio_tpg_check_prod_mode_write_protect(
+	struct se_portal_group *se_tpg)
+{
+	return iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
+}
+
+static int lio_tpg_check_prot_fabric_only(
+	struct se_portal_group *se_tpg)
+{
+	/*
+	 * Only report fabric_prot_type if t10_pi has also been enabled
+	 * for incoming ib_isert sessions.
+	 */
+	if (!iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
+		return 0;
+	return iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
+}
+
+/*
+ * This function calls iscsit_inc_session_usage_count() on the
+ * struct iscsi_session in question.
+ */
+static int lio_tpg_shutdown_session(struct se_session *se_sess)
+{
+	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+	struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
+
+	spin_lock_bh(&se_tpg->session_lock);
+	spin_lock(&sess->conn_lock);
+	if (atomic_read(&sess->session_fall_back_to_erl0) ||
+	    atomic_read(&sess->session_logout) ||
+	    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+		spin_unlock(&sess->conn_lock);
+		spin_unlock_bh(&se_tpg->session_lock);
+		return 0;
+	}
+	atomic_set(&sess->session_reinstatement, 1);
+	atomic_set(&sess->session_fall_back_to_erl0, 1);
+	spin_unlock(&sess->conn_lock);
+
+	iscsit_stop_time2retain_timer(sess);
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	iscsit_stop_session(sess, 1, 1);
+	return 1;
+}
+
+/*
+ * Calls iscsit_dec_session_usage_count() as inverse of
+ * lio_tpg_shutdown_session()
+ */
+static void lio_tpg_close_session(struct se_session *se_sess)
+{
+	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+	/*
+	 * If the iSCSI Session for the iSCSI Initiator Node exists,
+	 * forcefully shutdown the iSCSI NEXUS.
+	 */
+	iscsit_close_session(sess);
+}
+
+static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
+}
+
+static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
+{
+	struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
+				se_node_acl);
+	struct se_portal_group *se_tpg = se_acl->se_tpg;
+	struct iscsi_portal_group *tpg = container_of(se_tpg,
+				struct iscsi_portal_group, tpg_se_tpg);
+
+	acl->node_attrib.nacl = acl;
+	iscsit_set_default_node_attribues(acl, tpg);
+}
+
+static int lio_check_stop_free(struct se_cmd *se_cmd)
+{
+	return target_put_sess_cmd(se_cmd);
+}
+
+static void lio_release_cmd(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd);
+	iscsit_release_cmd(cmd);
+}
+
+const struct target_core_fabric_ops iscsi_ops = {
+	.module				= THIS_MODULE,
+	.name				= "iscsi",
+	.node_acl_size			= sizeof(struct iscsi_node_acl),
+	.get_fabric_name		= iscsi_get_fabric_name,
+	.tpg_get_wwn			= lio_tpg_get_endpoint_wwn,
+	.tpg_get_tag			= lio_tpg_get_tag,
+	.tpg_get_default_depth		= lio_tpg_get_default_depth,
+	.tpg_check_demo_mode		= lio_tpg_check_demo_mode,
+	.tpg_check_demo_mode_cache	= lio_tpg_check_demo_mode_cache,
+	.tpg_check_demo_mode_write_protect =
+			lio_tpg_check_demo_mode_write_protect,
+	.tpg_check_prod_mode_write_protect =
+			lio_tpg_check_prod_mode_write_protect,
+	.tpg_check_prot_fabric_only	= &lio_tpg_check_prot_fabric_only,
+	.tpg_get_inst_index		= lio_tpg_get_inst_index,
+	.check_stop_free		= lio_check_stop_free,
+	.release_cmd			= lio_release_cmd,
+	.shutdown_session		= lio_tpg_shutdown_session,
+	.close_session			= lio_tpg_close_session,
+	.sess_get_index			= lio_sess_get_index,
+	.sess_get_initiator_sid		= lio_sess_get_initiator_sid,
+	.write_pending			= lio_write_pending,
+	.write_pending_status		= lio_write_pending_status,
+	.set_default_node_attributes	= lio_set_default_node_attributes,
+	.get_cmd_state			= iscsi_get_cmd_state,
+	.queue_data_in			= lio_queue_data_in,
+	.queue_status			= lio_queue_status,
+	.queue_tm_rsp			= lio_queue_tm_rsp,
+	.aborted_task			= lio_aborted_task,
+	.fabric_make_wwn		= lio_target_call_coreaddtiqn,
+	.fabric_drop_wwn		= lio_target_call_coredeltiqn,
+	.fabric_make_tpg		= lio_target_tiqn_addtpg,
+	.fabric_drop_tpg		= lio_target_tiqn_deltpg,
+	.fabric_make_np			= lio_target_call_addnptotpg,
+	.fabric_drop_np			= lio_target_call_delnpfromtpg,
+	.fabric_init_nodeacl		= lio_target_init_nodeacl,
+	.fabric_cleanup_nodeacl		= lio_target_cleanup_nodeacl,
+
+	.tfc_discovery_attrs		= lio_target_discovery_auth_attrs,
+	.tfc_wwn_attrs			= lio_target_wwn_attrs,
+	.tfc_tpg_base_attrs		= lio_target_tpg_attrs,
+	.tfc_tpg_attrib_attrs		= lio_target_tpg_attrib_attrs,
+	.tfc_tpg_auth_attrs		= lio_target_tpg_auth_attrs,
+	.tfc_tpg_param_attrs		= lio_target_tpg_param_attrs,
+	.tfc_tpg_np_base_attrs		= lio_target_portal_attrs,
+	.tfc_tpg_nacl_base_attrs	= lio_target_initiator_attrs,
+	.tfc_tpg_nacl_attrib_attrs	= lio_target_nacl_attrib_attrs,
+	.tfc_tpg_nacl_auth_attrs	= lio_target_nacl_auth_attrs,
+	.tfc_tpg_nacl_param_attrs	= lio_target_nacl_param_attrs,
+};
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
new file mode 100644
index 0000000..fb3b52b
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -0,0 +1,526 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target DataIN value generation functions.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_datain_values.h"
+
+struct iscsi_datain_req *iscsit_allocate_datain_req(void)
+{
+	struct iscsi_datain_req *dr;
+
+	dr = kmem_cache_zalloc(lio_dr_cache, GFP_ATOMIC);
+	if (!dr) {
+		pr_err("Unable to allocate memory for"
+				" struct iscsi_datain_req\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&dr->cmd_datain_node);
+
+	return dr;
+}
+
+void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+{
+	spin_lock(&cmd->datain_lock);
+	list_add_tail(&dr->cmd_datain_node, &cmd->datain_list);
+	spin_unlock(&cmd->datain_lock);
+}
+
+void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+{
+	spin_lock(&cmd->datain_lock);
+	list_del(&dr->cmd_datain_node);
+	spin_unlock(&cmd->datain_lock);
+
+	kmem_cache_free(lio_dr_cache, dr);
+}
+
+void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
+{
+	struct iscsi_datain_req *dr, *dr_tmp;
+
+	spin_lock(&cmd->datain_lock);
+	list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, cmd_datain_node) {
+		list_del(&dr->cmd_datain_node);
+		kmem_cache_free(lio_dr_cache, dr);
+	}
+	spin_unlock(&cmd->datain_lock);
+}
+
+struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
+{
+	if (list_empty(&cmd->datain_list)) {
+		pr_err("cmd->datain_list is empty for ITT:"
+			" 0x%08x\n", cmd->init_task_tag);
+		return NULL;
+	}
+
+	return list_first_entry(&cmd->datain_list, struct iscsi_datain_req,
+				cmd_datain_node);
+}
+
+/*
+ *	For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain *datain)
+{
+	u32 next_burst_len, read_data_done, read_data_left;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+
+	dr = iscsit_get_datain_req(cmd);
+	if (!dr)
+		return NULL;
+
+	if (dr->recovery && dr->generate_recovery_values) {
+		if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+					cmd, dr) < 0)
+			return NULL;
+
+		dr->generate_recovery_values = 0;
+	}
+
+	next_burst_len = (!dr->recovery) ?
+			cmd->next_burst_len : dr->next_burst_len;
+	read_data_done = (!dr->recovery) ?
+			cmd->read_data_done : dr->read_data_done;
+
+	read_data_left = (cmd->se_cmd.data_length - read_data_done);
+	if (!read_data_left) {
+		pr_err("ITT: 0x%08x read_data_left is zero!\n",
+				cmd->init_task_tag);
+		return NULL;
+	}
+
+	if ((read_data_left <= conn->conn_ops->MaxRecvDataSegmentLength) &&
+	    (read_data_left <= (conn->sess->sess_ops->MaxBurstLength -
+	     next_burst_len))) {
+		datain->length = read_data_left;
+
+		datain->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
+		if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+			datain->flags |= ISCSI_FLAG_DATA_ACK;
+	} else {
+		if ((next_burst_len +
+		     conn->conn_ops->MaxRecvDataSegmentLength) <
+		     conn->sess->sess_ops->MaxBurstLength) {
+			datain->length =
+				conn->conn_ops->MaxRecvDataSegmentLength;
+			next_burst_len += datain->length;
+		} else {
+			datain->length = (conn->sess->sess_ops->MaxBurstLength -
+					  next_burst_len);
+			next_burst_len = 0;
+
+			datain->flags |= ISCSI_FLAG_CMD_FINAL;
+			if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+				datain->flags |= ISCSI_FLAG_DATA_ACK;
+		}
+	}
+
+	datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+	datain->offset = read_data_done;
+
+	if (!dr->recovery) {
+		cmd->next_burst_len = next_burst_len;
+		cmd->read_data_done += datain->length;
+	} else {
+		dr->next_burst_len = next_burst_len;
+		dr->read_data_done += datain->length;
+	}
+
+	if (!dr->recovery) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+			dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+		return dr;
+	}
+
+	if (!dr->runlength) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	} else {
+		if ((dr->begrun + dr->runlength) == dr->data_sn) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	}
+
+	return dr;
+}
+
+/*
+ *	For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain *datain)
+{
+	u32 offset, read_data_done, read_data_left, seq_send_order;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+	struct iscsi_seq *seq;
+
+	dr = iscsit_get_datain_req(cmd);
+	if (!dr)
+		return NULL;
+
+	if (dr->recovery && dr->generate_recovery_values) {
+		if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
+					cmd, dr) < 0)
+			return NULL;
+
+		dr->generate_recovery_values = 0;
+	}
+
+	read_data_done = (!dr->recovery) ?
+			cmd->read_data_done : dr->read_data_done;
+	seq_send_order = (!dr->recovery) ?
+			cmd->seq_send_order : dr->seq_send_order;
+
+	read_data_left = (cmd->se_cmd.data_length - read_data_done);
+	if (!read_data_left) {
+		pr_err("ITT: 0x%08x read_data_left is zero!\n",
+				cmd->init_task_tag);
+		return NULL;
+	}
+
+	seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
+	if (!seq)
+		return NULL;
+
+	seq->sent = 1;
+
+	if (!dr->recovery && !seq->next_burst_len)
+		seq->first_datasn = cmd->data_sn;
+
+	offset = (seq->offset + seq->next_burst_len);
+
+	if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+	     cmd->se_cmd.data_length) {
+		datain->length = (cmd->se_cmd.data_length - offset);
+		datain->offset = offset;
+
+		datain->flags |= ISCSI_FLAG_CMD_FINAL;
+		if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+			datain->flags |= ISCSI_FLAG_DATA_ACK;
+
+		seq->next_burst_len = 0;
+		seq_send_order++;
+	} else {
+		if ((seq->next_burst_len +
+		     conn->conn_ops->MaxRecvDataSegmentLength) <
+		     conn->sess->sess_ops->MaxBurstLength) {
+			datain->length =
+				conn->conn_ops->MaxRecvDataSegmentLength;
+			datain->offset = (seq->offset + seq->next_burst_len);
+
+			seq->next_burst_len += datain->length;
+		} else {
+			datain->length = (conn->sess->sess_ops->MaxBurstLength -
+					  seq->next_burst_len);
+			datain->offset = (seq->offset + seq->next_burst_len);
+
+			datain->flags |= ISCSI_FLAG_CMD_FINAL;
+			if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+				datain->flags |= ISCSI_FLAG_DATA_ACK;
+
+			seq->next_burst_len = 0;
+			seq_send_order++;
+		}
+	}
+
+	if ((read_data_done + datain->length) == cmd->se_cmd.data_length)
+		datain->flags |= ISCSI_FLAG_DATA_STATUS;
+
+	datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+	if (!dr->recovery) {
+		cmd->seq_send_order = seq_send_order;
+		cmd->read_data_done += datain->length;
+	} else {
+		dr->seq_send_order = seq_send_order;
+		dr->read_data_done += datain->length;
+	}
+
+	if (!dr->recovery) {
+		if (datain->flags & ISCSI_FLAG_CMD_FINAL)
+			seq->last_datasn = datain->data_sn;
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+			dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+		return dr;
+	}
+
+	if (!dr->runlength) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	} else {
+		if ((dr->begrun + dr->runlength) == dr->data_sn) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	}
+
+	return dr;
+}
+
+/*
+ *	For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain *datain)
+{
+	u32 next_burst_len, read_data_done, read_data_left;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+	struct iscsi_pdu *pdu;
+
+	dr = iscsit_get_datain_req(cmd);
+	if (!dr)
+		return NULL;
+
+	if (dr->recovery && dr->generate_recovery_values) {
+		if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+					cmd, dr) < 0)
+			return NULL;
+
+		dr->generate_recovery_values = 0;
+	}
+
+	next_burst_len = (!dr->recovery) ?
+			cmd->next_burst_len : dr->next_burst_len;
+	read_data_done = (!dr->recovery) ?
+			cmd->read_data_done : dr->read_data_done;
+
+	read_data_left = (cmd->se_cmd.data_length - read_data_done);
+	if (!read_data_left) {
+		pr_err("ITT: 0x%08x read_data_left is zero!\n",
+				cmd->init_task_tag);
+		return dr;
+	}
+
+	pdu = iscsit_get_pdu_holder_for_seq(cmd, NULL);
+	if (!pdu)
+		return dr;
+
+	if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) {
+		pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
+		if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+			pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+		next_burst_len = 0;
+	} else {
+		if ((next_burst_len + conn->conn_ops->MaxRecvDataSegmentLength) <
+		     conn->sess->sess_ops->MaxBurstLength)
+			next_burst_len += pdu->length;
+		else {
+			pdu->flags |= ISCSI_FLAG_CMD_FINAL;
+			if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+				pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+			next_burst_len = 0;
+		}
+	}
+
+	pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+	if (!dr->recovery) {
+		cmd->next_burst_len = next_burst_len;
+		cmd->read_data_done += pdu->length;
+	} else {
+		dr->next_burst_len = next_burst_len;
+		dr->read_data_done += pdu->length;
+	}
+
+	datain->flags = pdu->flags;
+	datain->length = pdu->length;
+	datain->offset = pdu->offset;
+	datain->data_sn = pdu->data_sn;
+
+	if (!dr->recovery) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+			dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+		return dr;
+	}
+
+	if (!dr->runlength) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	} else {
+		if ((dr->begrun + dr->runlength) == dr->data_sn) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	}
+
+	return dr;
+}
+
+/*
+ *	For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain *datain)
+{
+	u32 read_data_done, read_data_left, seq_send_order;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+	struct iscsi_pdu *pdu;
+	struct iscsi_seq *seq = NULL;
+
+	dr = iscsit_get_datain_req(cmd);
+	if (!dr)
+		return NULL;
+
+	if (dr->recovery && dr->generate_recovery_values) {
+		if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
+					cmd, dr) < 0)
+			return NULL;
+
+		dr->generate_recovery_values = 0;
+	}
+
+	read_data_done = (!dr->recovery) ?
+			cmd->read_data_done : dr->read_data_done;
+	seq_send_order = (!dr->recovery) ?
+			cmd->seq_send_order : dr->seq_send_order;
+
+	read_data_left = (cmd->se_cmd.data_length - read_data_done);
+	if (!read_data_left) {
+		pr_err("ITT: 0x%08x read_data_left is zero!\n",
+				cmd->init_task_tag);
+		return NULL;
+	}
+
+	seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
+	if (!seq)
+		return NULL;
+
+	seq->sent = 1;
+
+	if (!dr->recovery && !seq->next_burst_len)
+		seq->first_datasn = cmd->data_sn;
+
+	pdu = iscsit_get_pdu_holder_for_seq(cmd, seq);
+	if (!pdu)
+		return NULL;
+
+	if (seq->pdu_send_order == seq->pdu_count) {
+		pdu->flags |= ISCSI_FLAG_CMD_FINAL;
+		if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+			pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+		seq->next_burst_len = 0;
+		seq_send_order++;
+	} else
+		seq->next_burst_len += pdu->length;
+
+	if ((read_data_done + pdu->length) == cmd->se_cmd.data_length)
+		pdu->flags |= ISCSI_FLAG_DATA_STATUS;
+
+	pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+	if (!dr->recovery) {
+		cmd->seq_send_order = seq_send_order;
+		cmd->read_data_done += pdu->length;
+	} else {
+		dr->seq_send_order = seq_send_order;
+		dr->read_data_done += pdu->length;
+	}
+
+	datain->flags = pdu->flags;
+	datain->length = pdu->length;
+	datain->offset = pdu->offset;
+	datain->data_sn = pdu->data_sn;
+
+	if (!dr->recovery) {
+		if (datain->flags & ISCSI_FLAG_CMD_FINAL)
+			seq->last_datasn = datain->data_sn;
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+			dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+		return dr;
+	}
+
+	if (!dr->runlength) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	} else {
+		if ((dr->begrun + dr->runlength) == dr->data_sn) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	}
+
+	return dr;
+}
+
+struct iscsi_datain_req *iscsit_get_datain_values(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain *datain)
+{
+	struct iscsi_conn *conn = cmd->conn;
+
+	if (conn->sess->sess_ops->DataSequenceInOrder &&
+	    conn->sess->sess_ops->DataPDUInOrder)
+		return iscsit_set_datain_values_yes_and_yes(cmd, datain);
+	else if (!conn->sess->sess_ops->DataSequenceInOrder &&
+		  conn->sess->sess_ops->DataPDUInOrder)
+		return iscsit_set_datain_values_no_and_yes(cmd, datain);
+	else if (conn->sess->sess_ops->DataSequenceInOrder &&
+		 !conn->sess->sess_ops->DataPDUInOrder)
+		return iscsit_set_datain_values_yes_and_no(cmd, datain);
+	else if (!conn->sess->sess_ops->DataSequenceInOrder &&
+		   !conn->sess->sess_ops->DataPDUInOrder)
+		return iscsit_set_datain_values_no_and_no(cmd, datain);
+
+	return NULL;
+}
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
new file mode 100644
index 0000000..646429a
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -0,0 +1,12 @@
+#ifndef ISCSI_TARGET_DATAIN_VALUES_H
+#define ISCSI_TARGET_DATAIN_VALUES_H
+
+extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
+extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_all_datain_reqs(struct iscsi_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsi_cmd *,
+			struct iscsi_datain *);
+
+#endif   /*** ISCSI_TARGET_DATAIN_VALUES_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
new file mode 100644
index 0000000..0382fa2
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -0,0 +1,65 @@
+/*******************************************************************************
+ * This file contains the iSCSI Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+
+void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
+{
+	struct se_node_acl *se_nacl;
+
+	/*
+	 * This is a discovery session, the single queue slot was already
+	 * assigned in iscsi_login_zero_tsih().  Since only Logout and
+	 * Text Opcodes are allowed during discovery we do not have to worry
+	 * about the HBA's queue depth here.
+	 */
+	if (sess->sess_ops->SessionType)
+		return;
+
+	se_nacl = sess->se_sess->se_node_acl;
+
+	/*
+	 * This is a normal session, set the Session's CmdSN window to the
+	 * struct se_node_acl->queue_depth.  The value in struct se_node_acl->queue_depth
+	 * has already been validated as a legal value in
+	 * core_set_queue_depth_for_node().
+	 */
+	sess->cmdsn_window = se_nacl->queue_depth;
+	atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn);
+}
+
+void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
+{
+	u32 max_cmd_sn;
+
+	if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
+		return;
+
+	cmd->maxcmdsn_inc = 1;
+
+	max_cmd_sn = atomic_inc_return(&sess->max_cmd_sn);
+	pr_debug("Updated MaxCmdSN to 0x%08x\n", max_cmd_sn);
+}
+EXPORT_SYMBOL(iscsit_increment_maxcmdsn);
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
new file mode 100644
index 0000000..a0e2df9
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -0,0 +1,7 @@
+#ifndef ISCSI_TARGET_DEVICE_H
+#define ISCSI_TARGET_DEVICE_H
+
+extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
+extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+
+#endif /* ISCSI_TARGET_DEVICE_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
new file mode 100644
index 0000000..4eeb82c
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -0,0 +1,960 @@
+/******************************************************************************
+ * This file contains error recovery level zero functions used by
+ * the iSCSI Target driver.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+/*
+ *	Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
+ *	checks against to determine a PDU's Offset+Length is within the current
+ *	DataOUT Sequence.  Used for DataSequenceInOrder=Yes only.
+ */
+void iscsit_set_dataout_sequence_values(
+	struct iscsi_cmd *cmd)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	/*
+	 * Still set seq_start_offset and seq_end_offset for Unsolicited
+	 * DataOUT, even if DataSequenceInOrder=No.
+	 */
+	if (cmd->unsolicited_data) {
+		cmd->seq_start_offset = cmd->write_data_done;
+		cmd->seq_end_offset = min(cmd->se_cmd.data_length,
+					conn->sess->sess_ops->FirstBurstLength);
+		return;
+	}
+
+	if (!conn->sess->sess_ops->DataSequenceInOrder)
+		return;
+
+	if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
+		cmd->seq_start_offset = cmd->write_data_done;
+		cmd->seq_end_offset = (cmd->se_cmd.data_length >
+			conn->sess->sess_ops->MaxBurstLength) ?
+			(cmd->write_data_done +
+			conn->sess->sess_ops->MaxBurstLength) : cmd->se_cmd.data_length;
+	} else {
+		cmd->seq_start_offset = cmd->seq_end_offset;
+		cmd->seq_end_offset = ((cmd->seq_end_offset +
+			conn->sess->sess_ops->MaxBurstLength) >=
+			cmd->se_cmd.data_length) ? cmd->se_cmd.data_length :
+			(cmd->seq_end_offset +
+			 conn->sess->sess_ops->MaxBurstLength);
+	}
+}
+
+static int iscsit_dataout_within_command_recovery_check(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	/*
+	 * We do the within-command recovery checks here as it is
+	 * the first function called in iscsi_check_pre_dataout().
+	 * Basically, if we are in within-command recovery and
+	 * the PDU does not contain the offset the sequence needs,
+	 * dump the payload.
+	 *
+	 * This only applies to DataPDUInOrder=Yes, for
+	 * DataPDUInOrder=No we only re-request the failed PDU
+	 * and check that all PDUs in a sequence are received
+	 * upon end of sequence.
+	 */
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
+		    cmd->write_data_done != be32_to_cpu(hdr->offset))
+			goto dump;
+
+		cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
+	} else {
+		struct iscsi_seq *seq;
+
+		seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset),
+					    payload_length);
+		if (!seq)
+			return DATAOUT_CANNOT_RECOVER;
+		/*
+		 * Set the struct iscsi_seq pointer to reuse later.
+		 */
+		cmd->seq_ptr = seq;
+
+		if (conn->sess->sess_ops->DataPDUInOrder) {
+			if (seq->status ==
+			    DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY &&
+			   (seq->offset != be32_to_cpu(hdr->offset) ||
+			    seq->data_sn != be32_to_cpu(hdr->datasn)))
+				goto dump;
+		} else {
+			if (seq->status ==
+			     DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY &&
+			    seq->data_sn != be32_to_cpu(hdr->datasn))
+				goto dump;
+		}
+
+		if (seq->status == DATAOUT_SEQUENCE_COMPLETE)
+			goto dump;
+
+		if (seq->status != DATAOUT_SEQUENCE_COMPLETE)
+			seq->status = 0;
+	}
+
+	return DATAOUT_NORMAL;
+
+dump:
+	pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:"
+		" 0x%08x\n", hdr->offset, payload_length, hdr->datasn);
+	return iscsit_dump_data_payload(conn, payload_length, 1);
+}
+
+static int iscsit_dataout_check_unsolicited_sequence(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	u32 first_burst_len;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+
+	if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) ||
+	   ((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) {
+		pr_err("Command ITT: 0x%08x with Offset: %u,"
+		" Length: %u outside of Unsolicited Sequence %u:%u while"
+		" DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
+		be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset,
+			cmd->seq_end_offset);
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+	first_burst_len = (cmd->first_burst_len + payload_length);
+
+	if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) {
+		pr_err("Total %u bytes exceeds FirstBurstLength: %u"
+			" for this Unsolicited DataOut Burst.\n",
+			first_burst_len, conn->sess->sess_ops->FirstBurstLength);
+		transport_send_check_condition_and_sense(&cmd->se_cmd,
+				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+	/*
+	 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
+	 * checks for the current Unsolicited DataOUT Sequence.
+	 */
+	if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
+		/*
+		 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
+		 * sequence checks are handled in
+		 * iscsit_dataout_datapduinorder_no_fbit().
+		 */
+		if (!conn->sess->sess_ops->DataPDUInOrder)
+			goto out;
+
+		if ((first_burst_len != cmd->se_cmd.data_length) &&
+		    (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
+			pr_err("Unsolicited non-immediate data"
+			" received %u does not equal FirstBurstLength: %u, and"
+			" does not equal ExpXferLen %u.\n", first_burst_len,
+				conn->sess->sess_ops->FirstBurstLength,
+				cmd->se_cmd.data_length);
+			transport_send_check_condition_and_sense(&cmd->se_cmd,
+					TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+			return DATAOUT_CANNOT_RECOVER;
+		}
+	} else {
+		if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) {
+			pr_err("Command ITT: 0x%08x reached"
+			" FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
+				" error.\n", cmd->init_task_tag,
+				conn->sess->sess_ops->FirstBurstLength);
+			return DATAOUT_CANNOT_RECOVER;
+		}
+		if (first_burst_len == cmd->se_cmd.data_length) {
+			pr_err("Command ITT: 0x%08x reached"
+			" ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
+			" error.\n", cmd->init_task_tag, cmd->se_cmd.data_length);
+			return DATAOUT_CANNOT_RECOVER;
+		}
+	}
+
+out:
+	return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_check_sequence(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	u32 next_burst_len;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_seq *seq = NULL;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	/*
+	 * For DataSequenceInOrder=Yes: Check that the offset and offset+length
+	 * is within range as defined by iscsi_set_dataout_sequence_values().
+	 *
+	 * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for
+	 * offset+length tuple.
+	 */
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		/*
+		 * Due to possibility of recovery DataOUT sent by the initiator
+		 * fullfilling an Recovery R2T, it's best to just dump the
+		 * payload here, instead of erroring out.
+		 */
+		if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) ||
+		   ((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) {
+			pr_err("Command ITT: 0x%08x with Offset: %u,"
+			" Length: %u outside of Sequence %u:%u while"
+			" DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
+			be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset,
+				cmd->seq_end_offset);
+
+			if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+				return DATAOUT_CANNOT_RECOVER;
+			return DATAOUT_WITHIN_COMMAND_RECOVERY;
+		}
+
+		next_burst_len = (cmd->next_burst_len + payload_length);
+	} else {
+		seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset),
+					    payload_length);
+		if (!seq)
+			return DATAOUT_CANNOT_RECOVER;
+		/*
+		 * Set the struct iscsi_seq pointer to reuse later.
+		 */
+		cmd->seq_ptr = seq;
+
+		if (seq->status == DATAOUT_SEQUENCE_COMPLETE) {
+			if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+				return DATAOUT_CANNOT_RECOVER;
+			return DATAOUT_WITHIN_COMMAND_RECOVERY;
+		}
+
+		next_burst_len = (seq->next_burst_len + payload_length);
+	}
+
+	if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) {
+		pr_err("Command ITT: 0x%08x, NextBurstLength: %u and"
+			" Length: %u exceeds MaxBurstLength: %u. protocol"
+			" error.\n", cmd->init_task_tag,
+			(next_burst_len - payload_length),
+			payload_length, conn->sess->sess_ops->MaxBurstLength);
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+	/*
+	 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
+	 * checks for the current DataOUT Sequence.
+	 */
+	if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
+		/*
+		 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
+		 * sequence checks are handled in
+		 * iscsit_dataout_datapduinorder_no_fbit().
+		 */
+		if (!conn->sess->sess_ops->DataPDUInOrder)
+			goto out;
+
+		if (conn->sess->sess_ops->DataSequenceInOrder) {
+			if ((next_burst_len <
+			     conn->sess->sess_ops->MaxBurstLength) &&
+			   ((cmd->write_data_done + payload_length) <
+			     cmd->se_cmd.data_length)) {
+				pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
+				" before end of DataOUT sequence, protocol"
+				" error.\n", cmd->init_task_tag);
+				return DATAOUT_CANNOT_RECOVER;
+			}
+		} else {
+			if (next_burst_len < seq->xfer_len) {
+				pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
+				" before end of DataOUT sequence, protocol"
+				" error.\n", cmd->init_task_tag);
+				return DATAOUT_CANNOT_RECOVER;
+			}
+		}
+	} else {
+		if (conn->sess->sess_ops->DataSequenceInOrder) {
+			if (next_burst_len ==
+					conn->sess->sess_ops->MaxBurstLength) {
+				pr_err("Command ITT: 0x%08x reached"
+				" MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is"
+				" not set, protocol error.", cmd->init_task_tag,
+					conn->sess->sess_ops->MaxBurstLength);
+				return DATAOUT_CANNOT_RECOVER;
+			}
+			if ((cmd->write_data_done + payload_length) ==
+					cmd->se_cmd.data_length) {
+				pr_err("Command ITT: 0x%08x reached"
+				" last DataOUT PDU in sequence but ISCSI_FLAG_"
+				"CMD_FINAL is not set, protocol error.\n",
+					cmd->init_task_tag);
+				return DATAOUT_CANNOT_RECOVER;
+			}
+		} else {
+			if (next_burst_len == seq->xfer_len) {
+				pr_err("Command ITT: 0x%08x reached"
+				" last DataOUT PDU in sequence but ISCSI_FLAG_"
+				"CMD_FINAL is not set, protocol error.\n",
+					cmd->init_task_tag);
+				return DATAOUT_CANNOT_RECOVER;
+			}
+		}
+	}
+
+out:
+	return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_check_datasn(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	u32 data_sn = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	/*
+	 * Considering the target has no method of re-requesting DataOUT
+	 * by DataSN, if we receieve a greater DataSN than expected we
+	 * assume the functions for DataPDUInOrder=[Yes,No] below will
+	 * handle it.
+	 *
+	 * If the DataSN is less than expected, dump the payload.
+	 */
+	if (conn->sess->sess_ops->DataSequenceInOrder)
+		data_sn = cmd->data_sn;
+	else {
+		struct iscsi_seq *seq = cmd->seq_ptr;
+		data_sn = seq->data_sn;
+	}
+
+	if (be32_to_cpu(hdr->datasn) > data_sn) {
+		pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
+			" higher than expected 0x%08x.\n", cmd->init_task_tag,
+				be32_to_cpu(hdr->datasn), data_sn);
+		goto recover;
+	} else if (be32_to_cpu(hdr->datasn) < data_sn) {
+		pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
+			" lower than expected 0x%08x, discarding payload.\n",
+			cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn);
+		goto dump;
+	}
+
+	return DATAOUT_NORMAL;
+
+recover:
+	if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+		pr_err("Unable to perform within-command recovery"
+				" while ERL=0.\n");
+		return DATAOUT_CANNOT_RECOVER;
+	}
+dump:
+	if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+		return DATAOUT_CANNOT_RECOVER;
+
+	return DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static int iscsit_dataout_pre_datapduinorder_yes(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	int dump = 0, recovery = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	/*
+	 * For DataSequenceInOrder=Yes: If the offset is greater than the global
+	 * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
+	 * occurred and fail the connection.
+	 *
+	 * For DataSequenceInOrder=No: If the offset is greater than the per
+	 * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
+	 * error has occurred and fail the connection.
+	 */
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		if (be32_to_cpu(hdr->offset) != cmd->write_data_done) {
+			pr_err("Command ITT: 0x%08x, received offset"
+			" %u different than expected %u.\n", cmd->init_task_tag,
+				be32_to_cpu(hdr->offset), cmd->write_data_done);
+			recovery = 1;
+			goto recover;
+		}
+	} else {
+		struct iscsi_seq *seq = cmd->seq_ptr;
+
+		if (be32_to_cpu(hdr->offset) > seq->offset) {
+			pr_err("Command ITT: 0x%08x, received offset"
+			" %u greater than expected %u.\n", cmd->init_task_tag,
+				be32_to_cpu(hdr->offset), seq->offset);
+			recovery = 1;
+			goto recover;
+		} else if (be32_to_cpu(hdr->offset) < seq->offset) {
+			pr_err("Command ITT: 0x%08x, received offset"
+			" %u less than expected %u, discarding payload.\n",
+				cmd->init_task_tag, be32_to_cpu(hdr->offset),
+				seq->offset);
+			dump = 1;
+			goto dump;
+		}
+	}
+
+	return DATAOUT_NORMAL;
+
+recover:
+	if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+		pr_err("Unable to perform within-command recovery"
+				" while ERL=0.\n");
+		return DATAOUT_CANNOT_RECOVER;
+	}
+dump:
+	if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+		return DATAOUT_CANNOT_RECOVER;
+
+	return (recovery) ? iscsit_recover_dataout_sequence(cmd,
+		be32_to_cpu(hdr->offset), payload_length) :
+	       (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_pre_datapduinorder_no(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	struct iscsi_pdu *pdu;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	pdu = iscsit_get_pdu_holder(cmd, be32_to_cpu(hdr->offset),
+				    payload_length);
+	if (!pdu)
+		return DATAOUT_CANNOT_RECOVER;
+
+	cmd->pdu_ptr = pdu;
+
+	switch (pdu->status) {
+	case ISCSI_PDU_NOT_RECEIVED:
+	case ISCSI_PDU_CRC_FAILED:
+	case ISCSI_PDU_TIMED_OUT:
+		break;
+	case ISCSI_PDU_RECEIVED_OK:
+		pr_err("Command ITT: 0x%08x received already gotten"
+			" Offset: %u, Length: %u\n", cmd->init_task_tag,
+				be32_to_cpu(hdr->offset), payload_length);
+		return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
+	default:
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+	return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
+{
+	struct iscsi_r2t *r2t;
+
+	if (cmd->unsolicited_data)
+		return 0;
+
+	r2t = iscsit_get_r2t_for_eos(cmd, offset, length);
+	if (!r2t)
+		return -1;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	r2t->seq_complete = 1;
+	cmd->outstanding_r2ts--;
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return 0;
+}
+
+static int iscsit_dataout_update_datapduinorder_no(
+	struct iscsi_cmd *cmd,
+	u32 data_sn,
+	int f_bit)
+{
+	int ret = 0;
+	struct iscsi_pdu *pdu = cmd->pdu_ptr;
+
+	pdu->data_sn = data_sn;
+
+	switch (pdu->status) {
+	case ISCSI_PDU_NOT_RECEIVED:
+		pdu->status = ISCSI_PDU_RECEIVED_OK;
+		break;
+	case ISCSI_PDU_CRC_FAILED:
+		pdu->status = ISCSI_PDU_RECEIVED_OK;
+		break;
+	case ISCSI_PDU_TIMED_OUT:
+		pdu->status = ISCSI_PDU_RECEIVED_OK;
+		break;
+	default:
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+	if (f_bit) {
+		ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu);
+		if (ret == DATAOUT_CANNOT_RECOVER)
+			return ret;
+	}
+
+	return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_post_crc_passed(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	int ret, send_r2t = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_seq *seq = NULL;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	if (cmd->unsolicited_data) {
+		if ((cmd->first_burst_len + payload_length) ==
+		     conn->sess->sess_ops->FirstBurstLength) {
+			if (iscsit_dataout_update_r2t(cmd, be32_to_cpu(hdr->offset),
+					payload_length) < 0)
+				return DATAOUT_CANNOT_RECOVER;
+			send_r2t = 1;
+		}
+
+		if (!conn->sess->sess_ops->DataPDUInOrder) {
+			ret = iscsit_dataout_update_datapduinorder_no(cmd,
+				be32_to_cpu(hdr->datasn),
+				(hdr->flags & ISCSI_FLAG_CMD_FINAL));
+			if (ret == DATAOUT_CANNOT_RECOVER)
+				return ret;
+		}
+
+		cmd->first_burst_len += payload_length;
+
+		if (conn->sess->sess_ops->DataSequenceInOrder)
+			cmd->data_sn++;
+		else {
+			seq = cmd->seq_ptr;
+			seq->data_sn++;
+			seq->offset += payload_length;
+		}
+
+		if (send_r2t) {
+			if (seq)
+				seq->status = DATAOUT_SEQUENCE_COMPLETE;
+			cmd->first_burst_len = 0;
+			cmd->unsolicited_data = 0;
+		}
+	} else {
+		if (conn->sess->sess_ops->DataSequenceInOrder) {
+			if ((cmd->next_burst_len + payload_length) ==
+			     conn->sess->sess_ops->MaxBurstLength) {
+				if (iscsit_dataout_update_r2t(cmd,
+						be32_to_cpu(hdr->offset),
+						payload_length) < 0)
+					return DATAOUT_CANNOT_RECOVER;
+				send_r2t = 1;
+			}
+
+			if (!conn->sess->sess_ops->DataPDUInOrder) {
+				ret = iscsit_dataout_update_datapduinorder_no(
+						cmd, be32_to_cpu(hdr->datasn),
+						(hdr->flags & ISCSI_FLAG_CMD_FINAL));
+				if (ret == DATAOUT_CANNOT_RECOVER)
+					return ret;
+			}
+
+			cmd->next_burst_len += payload_length;
+			cmd->data_sn++;
+
+			if (send_r2t)
+				cmd->next_burst_len = 0;
+		} else {
+			seq = cmd->seq_ptr;
+
+			if ((seq->next_burst_len + payload_length) ==
+			     seq->xfer_len) {
+				if (iscsit_dataout_update_r2t(cmd,
+						be32_to_cpu(hdr->offset),
+						payload_length) < 0)
+					return DATAOUT_CANNOT_RECOVER;
+				send_r2t = 1;
+			}
+
+			if (!conn->sess->sess_ops->DataPDUInOrder) {
+				ret = iscsit_dataout_update_datapduinorder_no(
+						cmd, be32_to_cpu(hdr->datasn),
+						(hdr->flags & ISCSI_FLAG_CMD_FINAL));
+				if (ret == DATAOUT_CANNOT_RECOVER)
+					return ret;
+			}
+
+			seq->data_sn++;
+			seq->offset += payload_length;
+			seq->next_burst_len += payload_length;
+
+			if (send_r2t) {
+				seq->next_burst_len = 0;
+				seq->status = DATAOUT_SEQUENCE_COMPLETE;
+			}
+		}
+	}
+
+	if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder)
+		cmd->data_sn = 0;
+
+	cmd->write_data_done += payload_length;
+
+	if (cmd->write_data_done == cmd->se_cmd.data_length)
+		return DATAOUT_SEND_TO_TRANSPORT;
+	else if (send_r2t)
+		return DATAOUT_SEND_R2T;
+	else
+		return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_post_crc_failed(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *pdu;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	if (conn->sess->sess_ops->DataPDUInOrder)
+		goto recover;
+	/*
+	 * The rest of this function is only called when DataPDUInOrder=No.
+	 */
+	pdu = cmd->pdu_ptr;
+
+	switch (pdu->status) {
+	case ISCSI_PDU_NOT_RECEIVED:
+		pdu->status = ISCSI_PDU_CRC_FAILED;
+		break;
+	case ISCSI_PDU_CRC_FAILED:
+		break;
+	case ISCSI_PDU_TIMED_OUT:
+		pdu->status = ISCSI_PDU_CRC_FAILED;
+		break;
+	default:
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+recover:
+	return iscsit_recover_dataout_sequence(cmd, be32_to_cpu(hdr->offset),
+						payload_length);
+}
+
+/*
+ *	Called from iscsit_handle_data_out() before DataOUT Payload is received
+ *	and CRC computed.
+ */
+int iscsit_check_pre_dataout(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	int ret;
+	struct iscsi_conn *conn = cmd->conn;
+
+	ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
+	if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+	    (ret == DATAOUT_CANNOT_RECOVER))
+		return ret;
+
+	ret = iscsit_dataout_check_datasn(cmd, buf);
+	if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+	    (ret == DATAOUT_CANNOT_RECOVER))
+		return ret;
+
+	if (cmd->unsolicited_data) {
+		ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf);
+		if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+		    (ret == DATAOUT_CANNOT_RECOVER))
+			return ret;
+	} else {
+		ret = iscsit_dataout_check_sequence(cmd, buf);
+		if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+		    (ret == DATAOUT_CANNOT_RECOVER))
+			return ret;
+	}
+
+	return (conn->sess->sess_ops->DataPDUInOrder) ?
+		iscsit_dataout_pre_datapduinorder_yes(cmd, buf) :
+		iscsit_dataout_pre_datapduinorder_no(cmd, buf);
+}
+
+/*
+ *	Called from iscsit_handle_data_out() after DataOUT Payload is received
+ *	and CRC computed.
+ */
+int iscsit_check_post_dataout(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf,
+	u8 data_crc_failed)
+{
+	struct iscsi_conn *conn = cmd->conn;
+
+	cmd->dataout_timeout_retries = 0;
+
+	if (!data_crc_failed)
+		return iscsit_dataout_post_crc_passed(cmd, buf);
+	else {
+		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+			pr_err("Unable to recover from DataOUT CRC"
+				" failure while ERL=0, closing session.\n");
+			iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+					  buf);
+			return DATAOUT_CANNOT_RECOVER;
+		}
+
+		iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf);
+		return iscsit_dataout_post_crc_failed(cmd, buf);
+	}
+}
+
+static void iscsit_handle_time2retain_timeout(unsigned long data)
+{
+	struct iscsi_session *sess = (struct iscsi_session *) data;
+	struct iscsi_portal_group *tpg = sess->tpg;
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+	spin_lock_bh(&se_tpg->session_lock);
+	if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
+		spin_unlock_bh(&se_tpg->session_lock);
+		return;
+	}
+	if (atomic_read(&sess->session_reinstatement)) {
+		pr_err("Exiting Time2Retain handler because"
+				" session_reinstatement=1\n");
+		spin_unlock_bh(&se_tpg->session_lock);
+		return;
+	}
+	sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;
+
+	pr_err("Time2Retain timer expired for SID: %u, cleaning up"
+			" iSCSI session.\n", sess->sid);
+	{
+	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+	if (tiqn) {
+		spin_lock(&tiqn->sess_err_stats.lock);
+		strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+			(void *)sess->sess_ops->InitiatorName);
+		tiqn->sess_err_stats.last_sess_failure_type =
+				ISCSI_SESS_ERR_CXN_TIMEOUT;
+		tiqn->sess_err_stats.cxn_timeout_errors++;
+		atomic_long_inc(&sess->conn_timeout_errors);
+		spin_unlock(&tiqn->sess_err_stats.lock);
+	}
+	}
+
+	spin_unlock_bh(&se_tpg->session_lock);
+	target_put_session(sess->se_sess);
+}
+
+void iscsit_start_time2retain_handler(struct iscsi_session *sess)
+{
+	int tpg_active;
+	/*
+	 * Only start Time2Retain timer when the associated TPG is still in
+	 * an ACTIVE (eg: not disabled or shutdown) state.
+	 */
+	spin_lock(&sess->tpg->tpg_state_lock);
+	tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE);
+	spin_unlock(&sess->tpg->tpg_state_lock);
+
+	if (!tpg_active)
+		return;
+
+	if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING)
+		return;
+
+	pr_debug("Starting Time2Retain timer for %u seconds on"
+		" SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
+
+	init_timer(&sess->time2retain_timer);
+	sess->time2retain_timer.expires =
+		(get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
+	sess->time2retain_timer.data = (unsigned long)sess;
+	sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
+	sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
+	sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&sess->time2retain_timer);
+}
+
+/*
+ *	Called with spin_lock_bh(&struct se_portal_group->session_lock) held
+ */
+int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
+{
+	struct iscsi_portal_group *tpg = sess->tpg;
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+	if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
+		return -1;
+
+	if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING))
+		return 0;
+
+	sess->time2retain_timer_flags |= ISCSI_TF_STOP;
+	spin_unlock(&se_tpg->session_lock);
+
+	del_timer_sync(&sess->time2retain_timer);
+
+	spin_lock(&se_tpg->session_lock);
+	sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
+	pr_debug("Stopped Time2Retain Timer for SID: %u\n",
+			sess->sid);
+	return 0;
+}
+
+void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->state_lock);
+	if (atomic_read(&conn->connection_exit)) {
+		spin_unlock_bh(&conn->state_lock);
+		goto sleep;
+	}
+
+	if (atomic_read(&conn->transport_failed)) {
+		spin_unlock_bh(&conn->state_lock);
+		goto sleep;
+	}
+	spin_unlock_bh(&conn->state_lock);
+
+	if (conn->tx_thread && conn->tx_thread_active)
+		send_sig(SIGINT, conn->tx_thread, 1);
+	if (conn->rx_thread && conn->rx_thread_active)
+		send_sig(SIGINT, conn->rx_thread, 1);
+
+sleep:
+	wait_for_completion(&conn->conn_wait_rcfr_comp);
+	complete(&conn->conn_post_wait_comp);
+}
+
+void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
+{
+	spin_lock_bh(&conn->state_lock);
+	if (atomic_read(&conn->connection_exit)) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	if (atomic_read(&conn->transport_failed)) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	if (atomic_read(&conn->connection_reinstatement)) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	if (conn->tx_thread && conn->tx_thread_active)
+		send_sig(SIGINT, conn->tx_thread, 1);
+	if (conn->rx_thread && conn->rx_thread_active)
+		send_sig(SIGINT, conn->rx_thread, 1);
+
+	atomic_set(&conn->connection_reinstatement, 1);
+	if (!sleep) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	atomic_set(&conn->sleep_on_conn_wait_comp, 1);
+	spin_unlock_bh(&conn->state_lock);
+
+	wait_for_completion(&conn->conn_wait_comp);
+	complete(&conn->conn_post_wait_comp);
+}
+EXPORT_SYMBOL(iscsit_cause_connection_reinstatement);
+
+void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
+{
+	pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
+			" %u\n", sess->sid);
+
+	atomic_set(&sess->session_fall_back_to_erl0, 1);
+}
+
+static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+
+	if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
+	    !atomic_read(&sess->session_reinstatement) &&
+	    !atomic_read(&sess->session_fall_back_to_erl0))
+		iscsit_connection_recovery_transport_reset(conn);
+	else {
+		pr_debug("Performing cleanup for failed iSCSI"
+			" Connection ID: %hu from %s\n", conn->cid,
+			sess->sess_ops->InitiatorName);
+		iscsit_close_connection(conn);
+	}
+}
+
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
+{
+	*conn_freed = false;
+
+	spin_lock_bh(&conn->state_lock);
+	if (atomic_read(&conn->connection_exit)) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+	atomic_set(&conn->connection_exit, 1);
+
+	if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+		spin_unlock_bh(&conn->state_lock);
+		iscsit_close_connection(conn);
+		*conn_freed = true;
+		return;
+	}
+
+	if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
+	conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
+	spin_unlock_bh(&conn->state_lock);
+
+	iscsit_handle_connection_cleanup(conn);
+	*conn_freed = true;
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
new file mode 100644
index 0000000..fbc1d84
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -0,0 +1,14 @@
+#ifndef ISCSI_TARGET_ERL0_H
+#define ISCSI_TARGET_ERL0_H
+
+extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
+extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
+extern void iscsit_start_time2retain_handler(struct iscsi_session *);
+extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
+extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
+extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
+
+#endif   /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
new file mode 100644
index 0000000..2e561de
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -0,0 +1,1294 @@
+/*******************************************************************************
+ * This file contains error recovery level one used by the iSCSI Target driver.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target.h"
+
+#define OFFLOAD_BUF_SIZE	32768
+
+/*
+ *	Used to dump excess datain payload for certain error recovery
+ *	situations.  Receive in OFFLOAD_BUF_SIZE max of datain per rx_data().
+ *
+ *	dump_padding_digest denotes if padding and data digests need
+ *	to be dumped.
+ */
+int iscsit_dump_data_payload(
+	struct iscsi_conn *conn,
+	u32 buf_len,
+	int dump_padding_digest)
+{
+	char *buf, pad_bytes[4];
+	int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
+	u32 length, padding, offset = 0, size;
+	struct kvec iov;
+
+	if (conn->sess->sess_ops->RDMAExtensions)
+		return 0;
+
+	length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+
+	buf = kzalloc(length, GFP_ATOMIC);
+	if (!buf) {
+		pr_err("Unable to allocate %u bytes for offload"
+				" buffer.\n", length);
+		return -1;
+	}
+	memset(&iov, 0, sizeof(struct kvec));
+
+	while (offset < buf_len) {
+		size = ((offset + length) > buf_len) ?
+			(buf_len - offset) : length;
+
+		iov.iov_len = size;
+		iov.iov_base = buf;
+
+		rx_got = rx_data(conn, &iov, 1, size);
+		if (rx_got != size) {
+			ret = DATAOUT_CANNOT_RECOVER;
+			goto out;
+		}
+
+		offset += size;
+	}
+
+	if (!dump_padding_digest)
+		goto out;
+
+	padding = ((-buf_len) & 3);
+	if (padding != 0) {
+		iov.iov_len = padding;
+		iov.iov_base = pad_bytes;
+
+		rx_got = rx_data(conn, &iov, 1, padding);
+		if (rx_got != padding) {
+			ret = DATAOUT_CANNOT_RECOVER;
+			goto out;
+		}
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		u32 data_crc;
+
+		iov.iov_len = ISCSI_CRC_LEN;
+		iov.iov_base = &data_crc;
+
+		rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+		if (rx_got != ISCSI_CRC_LEN) {
+			ret = DATAOUT_CANNOT_RECOVER;
+			goto out;
+		}
+	}
+
+out:
+	kfree(buf);
+	return ret;
+}
+
+/*
+ *	Used for retransmitting R2Ts from a R2T SNACK request.
+ */
+static int iscsit_send_recovery_r2t_for_snack(
+	struct iscsi_cmd *cmd,
+	struct iscsi_r2t *r2t)
+{
+	/*
+	 * If the struct iscsi_r2t has not been sent yet, we can safely
+	 * ignore retransmission
+	 * of the R2TSN in question.
+	 */
+	spin_lock_bh(&cmd->r2t_lock);
+	if (!r2t->sent_r2t) {
+		spin_unlock_bh(&cmd->r2t_lock);
+		return 0;
+	}
+	r2t->sent_r2t = 0;
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
+
+	return 0;
+}
+
+static int iscsit_handle_r2t_snack(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf,
+	u32 begrun,
+	u32 runlength)
+{
+	u32 last_r2tsn;
+	struct iscsi_r2t *r2t;
+
+	/*
+	 * Make sure the initiator is not requesting retransmission
+	 * of R2TSNs already acknowledged by a TMR TASK_REASSIGN.
+	 */
+	if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+	    (begrun <= cmd->acked_data_sn)) {
+		pr_err("ITT: 0x%08x, R2T SNACK requesting"
+			" retransmission of R2TSN: 0x%08x to 0x%08x but already"
+			" acked to  R2TSN: 0x%08x by TMR TASK_REASSIGN,"
+			" protocol error.\n", cmd->init_task_tag, begrun,
+			(begrun + runlength), cmd->acked_data_sn);
+
+			return iscsit_reject_cmd(cmd,
+					ISCSI_REASON_PROTOCOL_ERROR, buf);
+	}
+
+	if (runlength) {
+		if ((begrun + runlength) > cmd->r2t_sn) {
+			pr_err("Command ITT: 0x%08x received R2T SNACK"
+			" with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
+			" current R2TSN: 0x%08x, protocol error.\n",
+			cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
+			return iscsit_reject_cmd(cmd,
+					ISCSI_REASON_BOOKMARK_INVALID, buf);
+		}
+		last_r2tsn = (begrun + runlength);
+	} else
+		last_r2tsn = cmd->r2t_sn;
+
+	while (begrun < last_r2tsn) {
+		r2t = iscsit_get_holder_for_r2tsn(cmd, begrun);
+		if (!r2t)
+			return -1;
+		if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0)
+			return -1;
+
+		begrun++;
+	}
+
+	return 0;
+}
+
+/*
+ *	Generates Offsets and NextBurstLength based on Begrun and Runlength
+ *	carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
+ *
+ *	For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only.
+ *
+ *	FIXME: How is this handled for a RData SNACK?
+ */
+int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain_req *dr)
+{
+	u32 data_sn = 0, data_sn_count = 0;
+	u32 pdu_start = 0, seq_no = 0;
+	u32 begrun = dr->begrun;
+	struct iscsi_conn *conn = cmd->conn;
+
+	while (begrun > data_sn++) {
+		data_sn_count++;
+		if ((dr->next_burst_len +
+		     conn->conn_ops->MaxRecvDataSegmentLength) <
+		     conn->sess->sess_ops->MaxBurstLength) {
+			dr->read_data_done +=
+				conn->conn_ops->MaxRecvDataSegmentLength;
+			dr->next_burst_len +=
+				conn->conn_ops->MaxRecvDataSegmentLength;
+		} else {
+			dr->read_data_done +=
+				(conn->sess->sess_ops->MaxBurstLength -
+				 dr->next_burst_len);
+			dr->next_burst_len = 0;
+			pdu_start += data_sn_count;
+			data_sn_count = 0;
+			seq_no++;
+		}
+	}
+
+	if (!conn->sess->sess_ops->DataPDUInOrder) {
+		cmd->seq_no = seq_no;
+		cmd->pdu_start = pdu_start;
+		cmd->pdu_send_order = data_sn_count;
+	}
+
+	return 0;
+}
+
+/*
+ *	Generates Offsets and NextBurstLength based on Begrun and Runlength
+ *	carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
+ *
+ *	For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only.
+ *
+ *	FIXME: How is this handled for a RData SNACK?
+ */
+int iscsit_create_recovery_datain_values_datasequenceinorder_no(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain_req *dr)
+{
+	int found_seq = 0, i;
+	u32 data_sn, read_data_done = 0, seq_send_order = 0;
+	u32 begrun = dr->begrun;
+	u32 runlength = dr->runlength;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_seq *first_seq = NULL, *seq = NULL;
+
+	if (!cmd->seq_list) {
+		pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+		return -1;
+	}
+
+	/*
+	 * Calculate read_data_done for all sequences containing a
+	 * first_datasn and last_datasn less than the BegRun.
+	 *
+	 * Locate the struct iscsi_seq the BegRun lies within and calculate
+	 * NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength.
+	 *
+	 * Also use struct iscsi_seq->seq_send_order to determine where to start.
+	 */
+	for (i = 0; i < cmd->seq_count; i++) {
+		seq = &cmd->seq_list[i];
+
+		if (!seq->seq_send_order)
+			first_seq = seq;
+
+		/*
+		 * No data has been transferred for this DataIN sequence, so the
+		 * seq->first_datasn and seq->last_datasn have not been set.
+		 */
+		if (!seq->sent) {
+			pr_err("Ignoring non-sent sequence 0x%08x ->"
+				" 0x%08x\n\n", seq->first_datasn,
+				seq->last_datasn);
+			continue;
+		}
+
+		/*
+		 * This DataIN sequence is precedes the received BegRun, add the
+		 * total xfer_len of the sequence to read_data_done and reset
+		 * seq->pdu_send_order.
+		 */
+		if ((seq->first_datasn < begrun) &&
+				(seq->last_datasn < begrun)) {
+			pr_err("Pre BegRun sequence 0x%08x ->"
+				" 0x%08x\n", seq->first_datasn,
+				seq->last_datasn);
+
+			read_data_done += cmd->seq_list[i].xfer_len;
+			seq->next_burst_len = seq->pdu_send_order = 0;
+			continue;
+		}
+
+		/*
+		 * The BegRun lies within this DataIN sequence.
+		 */
+		if ((seq->first_datasn <= begrun) &&
+				(seq->last_datasn >= begrun)) {
+			pr_err("Found sequence begrun: 0x%08x in"
+				" 0x%08x -> 0x%08x\n", begrun,
+				seq->first_datasn, seq->last_datasn);
+
+			seq_send_order = seq->seq_send_order;
+			data_sn = seq->first_datasn;
+			seq->next_burst_len = seq->pdu_send_order = 0;
+			found_seq = 1;
+
+			/*
+			 * For DataPDUInOrder=Yes, while the first DataSN of
+			 * the sequence is less than the received BegRun, add
+			 * the MaxRecvDataSegmentLength to read_data_done and
+			 * to the sequence's next_burst_len;
+			 *
+			 * For DataPDUInOrder=No, while the first DataSN of the
+			 * sequence is less than the received BegRun, find the
+			 * struct iscsi_pdu of the DataSN in question and add the
+			 * MaxRecvDataSegmentLength to read_data_done and to the
+			 * sequence's next_burst_len;
+			 */
+			if (conn->sess->sess_ops->DataPDUInOrder) {
+				while (data_sn < begrun) {
+					seq->pdu_send_order++;
+					read_data_done +=
+						conn->conn_ops->MaxRecvDataSegmentLength;
+					seq->next_burst_len +=
+						conn->conn_ops->MaxRecvDataSegmentLength;
+					data_sn++;
+				}
+			} else {
+				int j;
+				struct iscsi_pdu *pdu;
+
+				while (data_sn < begrun) {
+					seq->pdu_send_order++;
+
+					for (j = 0; j < seq->pdu_count; j++) {
+						pdu = &cmd->pdu_list[
+							seq->pdu_start + j];
+						if (pdu->data_sn == data_sn) {
+							read_data_done +=
+								pdu->length;
+							seq->next_burst_len +=
+								pdu->length;
+						}
+					}
+					data_sn++;
+				}
+			}
+			continue;
+		}
+
+		/*
+		 * This DataIN sequence is larger than the received BegRun,
+		 * reset seq->pdu_send_order and continue.
+		 */
+		if ((seq->first_datasn > begrun) ||
+				(seq->last_datasn > begrun)) {
+			pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
+					seq->first_datasn, seq->last_datasn);
+
+			seq->next_burst_len = seq->pdu_send_order = 0;
+			continue;
+		}
+	}
+
+	if (!found_seq) {
+		if (!begrun) {
+			if (!first_seq) {
+				pr_err("ITT: 0x%08x, Begrun: 0x%08x"
+					" but first_seq is NULL\n",
+					cmd->init_task_tag, begrun);
+				return -1;
+			}
+			seq_send_order = first_seq->seq_send_order;
+			seq->next_burst_len = seq->pdu_send_order = 0;
+			goto done;
+		}
+
+		pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x,"
+			" BegRun: 0x%08x, RunLength: 0x%08x while"
+			" DataSequenceInOrder=No and DataPDUInOrder=%s.\n",
+				cmd->init_task_tag, begrun, runlength,
+			(conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No");
+		return -1;
+	}
+
+done:
+	dr->read_data_done = read_data_done;
+	dr->seq_send_order = seq_send_order;
+
+	return 0;
+}
+
+static int iscsit_handle_recovery_datain(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf,
+	u32 begrun,
+	u32 runlength)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+
+	if (!(se_cmd->transport_state & CMD_T_COMPLETE)) {
+		pr_err("Ignoring ITT: 0x%08x Data SNACK\n",
+				cmd->init_task_tag);
+		return 0;
+	}
+
+	/*
+	 * Make sure the initiator is not requesting retransmission
+	 * of DataSNs already acknowledged by a Data ACK SNACK.
+	 */
+	if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+	    (begrun <= cmd->acked_data_sn)) {
+		pr_err("ITT: 0x%08x, Data SNACK requesting"
+			" retransmission of DataSN: 0x%08x to 0x%08x but"
+			" already acked to DataSN: 0x%08x by Data ACK SNACK,"
+			" protocol error.\n", cmd->init_task_tag, begrun,
+			(begrun + runlength), cmd->acked_data_sn);
+
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
+	}
+
+	/*
+	 * Make sure BegRun and RunLength in the Data SNACK are sane.
+	 * Note: (cmd->data_sn - 1) will carry the maximum DataSN sent.
+	 */
+	if ((begrun + runlength) > (cmd->data_sn - 1)) {
+		pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
+			": 0x%08x greater than maximum DataSN: 0x%08x.\n",
+				begrun, runlength, (cmd->data_sn - 1));
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID,
+					 buf);
+	}
+
+	dr = iscsit_allocate_datain_req();
+	if (!dr)
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+					 buf);
+
+	dr->data_sn = dr->begrun = begrun;
+	dr->runlength = runlength;
+	dr->generate_recovery_values = 1;
+	dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY;
+
+	iscsit_attach_datain_req(cmd, dr);
+
+	cmd->i_state = ISTATE_SEND_DATAIN;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	return 0;
+}
+
+int iscsit_handle_recovery_datain_or_r2t(
+	struct iscsi_conn *conn,
+	unsigned char *buf,
+	itt_t init_task_tag,
+	u32 targ_xfer_tag,
+	u32 begrun,
+	u32 runlength)
+{
+	struct iscsi_cmd *cmd;
+
+	cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
+	if (!cmd)
+		return 0;
+
+	/*
+	 * FIXME: This will not work for bidi commands.
+	 */
+	switch (cmd->data_direction) {
+	case DMA_TO_DEVICE:
+		return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength);
+	case DMA_FROM_DEVICE:
+		return iscsit_handle_recovery_datain(cmd, buf, begrun,
+				runlength);
+	default:
+		pr_err("Unknown cmd->data_direction: 0x%02x\n",
+				cmd->data_direction);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
+int iscsit_handle_status_snack(
+	struct iscsi_conn *conn,
+	itt_t init_task_tag,
+	u32 targ_xfer_tag,
+	u32 begrun,
+	u32 runlength)
+{
+	struct iscsi_cmd *cmd = NULL;
+	u32 last_statsn;
+	int found_cmd;
+
+	if (!begrun) {
+		begrun = conn->exp_statsn;
+	} else if (conn->exp_statsn > begrun) {
+		pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
+			" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
+			" %hu.\n", begrun, runlength, conn->exp_statsn,
+			conn->cid);
+		return 0;
+	}
+
+	last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength);
+
+	while (begrun < last_statsn) {
+		found_cmd = 0;
+
+		spin_lock_bh(&conn->cmd_lock);
+		list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
+			if (cmd->stat_sn == begrun) {
+				found_cmd = 1;
+				break;
+			}
+		}
+		spin_unlock_bh(&conn->cmd_lock);
+
+		if (!found_cmd) {
+			pr_err("Unable to find StatSN: 0x%08x for"
+				" a Status SNACK, assuming this was a"
+				" protactic SNACK for an untransmitted"
+				" StatSN, ignoring.\n", begrun);
+			begrun++;
+			continue;
+		}
+
+		spin_lock_bh(&cmd->istate_lock);
+		if (cmd->i_state == ISTATE_SEND_DATAIN) {
+			spin_unlock_bh(&cmd->istate_lock);
+			pr_err("Ignoring Status SNACK for BegRun:"
+				" 0x%08x, RunLength: 0x%08x, assuming this was"
+				" a protactic SNACK for an untransmitted"
+				" StatSN\n", begrun, runlength);
+			begrun++;
+			continue;
+		}
+		spin_unlock_bh(&cmd->istate_lock);
+
+		cmd->i_state = ISTATE_SEND_STATUS_RECOVERY;
+		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+		begrun++;
+	}
+
+	return 0;
+}
+
+int iscsit_handle_data_ack(
+	struct iscsi_conn *conn,
+	u32 targ_xfer_tag,
+	u32 begrun,
+	u32 runlength)
+{
+	struct iscsi_cmd *cmd = NULL;
+
+	cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
+	if (!cmd) {
+		pr_err("Data ACK SNACK for TTT: 0x%08x is"
+			" invalid.\n", targ_xfer_tag);
+		return -1;
+	}
+
+	if (begrun <= cmd->acked_data_sn) {
+		pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is"
+			" less than the already acked DataSN: 0x%08x.\n",
+			cmd->init_task_tag, begrun, cmd->acked_data_sn);
+		return -1;
+	}
+
+	/*
+	 * For Data ACK SNACK, BegRun is the next expected DataSN.
+	 * (see iSCSI v19: 10.16.6)
+	 */
+	cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+	cmd->acked_data_sn = (begrun - 1);
+
+	pr_debug("Received Data ACK SNACK for ITT: 0x%08x,"
+		" updated acked DataSN to 0x%08x.\n",
+			cmd->init_task_tag, cmd->acked_data_sn);
+
+	return 0;
+}
+
+static int iscsit_send_recovery_r2t(
+	struct iscsi_cmd *cmd,
+	u32 offset,
+	u32 xfer_len)
+{
+	int ret;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0);
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return ret;
+}
+
+int iscsit_dataout_datapduinorder_no_fbit(
+	struct iscsi_cmd *cmd,
+	struct iscsi_pdu *pdu)
+{
+	int i, send_recovery_r2t = 0, recovery = 0;
+	u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *first_pdu = NULL;
+
+	/*
+	 * Get an struct iscsi_pdu pointer to the first PDU, and total PDU count
+	 * of the DataOUT sequence.
+	 */
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		for (i = 0; i < cmd->pdu_count; i++) {
+			if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
+				if (!first_pdu)
+					first_pdu = &cmd->pdu_list[i];
+				 xfer_len += cmd->pdu_list[i].length;
+				 pdu_count++;
+			} else if (pdu_count)
+				break;
+		}
+	} else {
+		struct iscsi_seq *seq = cmd->seq_ptr;
+
+		first_pdu = &cmd->pdu_list[seq->pdu_start];
+		pdu_count = seq->pdu_count;
+	}
+
+	if (!first_pdu || !pdu_count)
+		return DATAOUT_CANNOT_RECOVER;
+
+	/*
+	 * Loop through the ending DataOUT Sequence checking each struct iscsi_pdu.
+	 * The following ugly logic does batching of not received PDUs.
+	 */
+	for (i = 0; i < pdu_count; i++) {
+		if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) {
+			if (!send_recovery_r2t)
+				continue;
+
+			if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
+				return DATAOUT_CANNOT_RECOVER;
+
+			send_recovery_r2t = length = offset = 0;
+			continue;
+		}
+		/*
+		 * Set recovery = 1 for any missing, CRC failed, or timed
+		 * out PDUs to let the DataOUT logic know that this sequence
+		 * has not been completed yet.
+		 *
+		 * Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED.
+		 * We assume if the PDU either failed CRC or timed out
+		 * that a Recovery R2T has already been sent.
+		 */
+		recovery = 1;
+
+		if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED)
+			continue;
+
+		if (!offset)
+			offset = first_pdu[i].offset;
+		length += first_pdu[i].length;
+
+		send_recovery_r2t = 1;
+	}
+
+	if (send_recovery_r2t)
+		if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
+			return DATAOUT_CANNOT_RECOVER;
+
+	return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static int iscsit_recalculate_dataout_values(
+	struct iscsi_cmd *cmd,
+	u32 pdu_offset,
+	u32 pdu_length,
+	u32 *r2t_offset,
+	u32 *r2t_length)
+{
+	int i;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *pdu = NULL;
+
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		cmd->data_sn = 0;
+
+		if (conn->sess->sess_ops->DataPDUInOrder) {
+			*r2t_offset = cmd->write_data_done;
+			*r2t_length = (cmd->seq_end_offset -
+					cmd->write_data_done);
+			return 0;
+		}
+
+		*r2t_offset = cmd->seq_start_offset;
+		*r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset);
+
+		for (i = 0; i < cmd->pdu_count; i++) {
+			pdu = &cmd->pdu_list[i];
+
+			if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+				continue;
+
+			if ((pdu->offset >= cmd->seq_start_offset) &&
+			   ((pdu->offset + pdu->length) <=
+			     cmd->seq_end_offset)) {
+				if (!cmd->unsolicited_data)
+					cmd->next_burst_len -= pdu->length;
+				else
+					cmd->first_burst_len -= pdu->length;
+
+				cmd->write_data_done -= pdu->length;
+				pdu->status = ISCSI_PDU_NOT_RECEIVED;
+			}
+		}
+	} else {
+		struct iscsi_seq *seq = NULL;
+
+		seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length);
+		if (!seq)
+			return -1;
+
+		*r2t_offset = seq->orig_offset;
+		*r2t_length = seq->xfer_len;
+
+		cmd->write_data_done -= (seq->offset - seq->orig_offset);
+		if (cmd->immediate_data)
+			cmd->first_burst_len = cmd->write_data_done;
+
+		seq->data_sn = 0;
+		seq->offset = seq->orig_offset;
+		seq->next_burst_len = 0;
+		seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+		if (conn->sess->sess_ops->DataPDUInOrder)
+			return 0;
+
+		for (i = 0; i < seq->pdu_count; i++) {
+			pdu = &cmd->pdu_list[i+seq->pdu_start];
+
+			if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+				continue;
+
+			pdu->status = ISCSI_PDU_NOT_RECEIVED;
+		}
+	}
+
+	return 0;
+}
+
+int iscsit_recover_dataout_sequence(
+	struct iscsi_cmd *cmd,
+	u32 pdu_offset,
+	u32 pdu_length)
+{
+	u32 r2t_length = 0, r2t_offset = 0;
+
+	spin_lock_bh(&cmd->istate_lock);
+	cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
+	spin_unlock_bh(&cmd->istate_lock);
+
+	if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
+			&r2t_offset, &r2t_length) < 0)
+		return DATAOUT_CANNOT_RECOVER;
+
+	iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length);
+
+	return DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
+{
+	struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL;
+
+	ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC);
+	if (!ooo_cmdsn) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_ooo_cmdsn.\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&ooo_cmdsn->ooo_list);
+
+	return ooo_cmdsn;
+}
+
+/*
+ *	Called with sess->cmdsn_mutex held.
+ */
+static int iscsit_attach_ooo_cmdsn(
+	struct iscsi_session *sess,
+	struct iscsi_ooo_cmdsn *ooo_cmdsn)
+{
+	struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
+	/*
+	 * We attach the struct iscsi_ooo_cmdsn entry to the out of order
+	 * list in increasing CmdSN order.
+	 * This allows iscsi_execute_ooo_cmdsns() to detect any
+	 * additional CmdSN holes while performing delayed execution.
+	 */
+	if (list_empty(&sess->sess_ooo_cmdsn_list))
+		list_add_tail(&ooo_cmdsn->ooo_list,
+				&sess->sess_ooo_cmdsn_list);
+	else {
+		ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
+				typeof(*ooo_tail), ooo_list);
+		/*
+		 * CmdSN is greater than the tail of the list.
+		 */
+		if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
+			list_add_tail(&ooo_cmdsn->ooo_list,
+					&sess->sess_ooo_cmdsn_list);
+		else {
+			/*
+			 * CmdSN is either lower than the head,  or somewhere
+			 * in the middle.
+			 */
+			list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
+						ooo_list) {
+				if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
+					continue;
+
+				/* Insert before this entry */
+				list_add(&ooo_cmdsn->ooo_list,
+					ooo_tmp->ooo_list.prev);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *	Removes an struct iscsi_ooo_cmdsn from a session's list,
+ *	called with struct iscsi_session->cmdsn_mutex held.
+ */
+void iscsit_remove_ooo_cmdsn(
+	struct iscsi_session *sess,
+	struct iscsi_ooo_cmdsn *ooo_cmdsn)
+{
+	list_del(&ooo_cmdsn->ooo_list);
+	kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+}
+
+void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+{
+	struct iscsi_ooo_cmdsn *ooo_cmdsn;
+	struct iscsi_session *sess = conn->sess;
+
+	mutex_lock(&sess->cmdsn_mutex);
+	list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
+		if (ooo_cmdsn->cid != conn->cid)
+			continue;
+
+		ooo_cmdsn->cmd = NULL;
+	}
+	mutex_unlock(&sess->cmdsn_mutex);
+}
+
+/*
+ *	Called with sess->cmdsn_mutex held.
+ */
+int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
+{
+	int ooo_count = 0;
+	struct iscsi_cmd *cmd = NULL;
+	struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+
+	list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+				&sess->sess_ooo_cmdsn_list, ooo_list) {
+		if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
+			continue;
+
+		if (!ooo_cmdsn->cmd) {
+			sess->exp_cmd_sn++;
+			iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+			continue;
+		}
+
+		cmd = ooo_cmdsn->cmd;
+		cmd->i_state = cmd->deferred_i_state;
+		ooo_count++;
+		sess->exp_cmd_sn++;
+		pr_debug("Executing out of order CmdSN: 0x%08x,"
+			" incremented ExpCmdSN to 0x%08x.\n",
+			cmd->cmd_sn, sess->exp_cmd_sn);
+
+		iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+
+		if (iscsit_execute_cmd(cmd, 1) < 0)
+			return -1;
+
+		continue;
+	}
+
+	return ooo_count;
+}
+
+/*
+ *	Called either:
+ *
+ *	1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns()
+ *	or iscsi_check_received_cmdsn().
+ *	2. With no locks held directly from iscsi_handle_XXX_pdu() functions
+ *	for immediate commands.
+ */
+int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct iscsi_conn *conn = cmd->conn;
+	int lr = 0;
+
+	spin_lock_bh(&cmd->istate_lock);
+	if (ooo)
+		cmd->cmd_flags &= ~ICF_OOO_CMDSN;
+
+	switch (cmd->iscsi_opcode) {
+	case ISCSI_OP_SCSI_CMD:
+		/*
+		 * Go ahead and send the CHECK_CONDITION status for
+		 * any SCSI CDB exceptions that may have occurred.
+		 */
+		if (cmd->sense_reason) {
+			if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) {
+				cmd->i_state = ISTATE_SEND_STATUS;
+				spin_unlock_bh(&cmd->istate_lock);
+				iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
+						cmd->i_state);
+				return 0;
+			}
+			spin_unlock_bh(&cmd->istate_lock);
+			/*
+			 * Determine if delayed TASK_ABORTED status for WRITEs
+			 * should be sent now if no unsolicited data out
+			 * payloads are expected, or if the delayed status
+			 * should be sent after unsolicited data out with
+			 * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
+			 */
+			if (transport_check_aborted_status(se_cmd,
+					(cmd->unsolicited_data == 0)) != 0)
+				return 0;
+			/*
+			 * Otherwise send CHECK_CONDITION and sense for
+			 * exception
+			 */
+			return transport_send_check_condition_and_sense(se_cmd,
+					cmd->sense_reason, 0);
+		}
+		/*
+		 * Special case for delayed CmdSN with Immediate
+		 * Data and/or Unsolicited Data Out attached.
+		 */
+		if (cmd->immediate_data) {
+			if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+				spin_unlock_bh(&cmd->istate_lock);
+				target_execute_cmd(&cmd->se_cmd);
+				return 0;
+			}
+			spin_unlock_bh(&cmd->istate_lock);
+
+			if (!(cmd->cmd_flags &
+					ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
+				/*
+				 * Send the delayed TASK_ABORTED status for
+				 * WRITEs if no more unsolicitied data is
+				 * expected.
+				 */
+				if (transport_check_aborted_status(se_cmd, 1)
+						!= 0)
+					return 0;
+
+				iscsit_set_dataout_sequence_values(cmd);
+				conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
+			}
+			return 0;
+		}
+		/*
+		 * The default handler.
+		 */
+		spin_unlock_bh(&cmd->istate_lock);
+
+		if ((cmd->data_direction == DMA_TO_DEVICE) &&
+		    !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
+			/*
+			 * Send the delayed TASK_ABORTED status for WRITEs if
+			 * no more nsolicitied data is expected.
+			 */
+			if (transport_check_aborted_status(se_cmd, 1) != 0)
+				return 0;
+
+			iscsit_set_unsoliticed_dataout(cmd);
+		}
+		return transport_handle_cdb_direct(&cmd->se_cmd);
+
+	case ISCSI_OP_NOOP_OUT:
+	case ISCSI_OP_TEXT:
+		spin_unlock_bh(&cmd->istate_lock);
+		iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+		break;
+	case ISCSI_OP_SCSI_TMFUNC:
+		if (cmd->se_cmd.se_tmr_req->response) {
+			spin_unlock_bh(&cmd->istate_lock);
+			iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
+					cmd->i_state);
+			return 0;
+		}
+		spin_unlock_bh(&cmd->istate_lock);
+
+		return transport_generic_handle_tmr(&cmd->se_cmd);
+	case ISCSI_OP_LOGOUT:
+		spin_unlock_bh(&cmd->istate_lock);
+		switch (cmd->logout_reason) {
+		case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+			lr = iscsit_logout_closesession(cmd, cmd->conn);
+			break;
+		case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+			lr = iscsit_logout_closeconnection(cmd, cmd->conn);
+			break;
+		case ISCSI_LOGOUT_REASON_RECOVERY:
+			lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn);
+			break;
+		default:
+			pr_err("Unknown iSCSI Logout Request Code:"
+				" 0x%02x\n", cmd->logout_reason);
+			return -1;
+		}
+
+		return lr;
+	default:
+		spin_unlock_bh(&cmd->istate_lock);
+		pr_err("Cannot perform out of order execution for"
+		" unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode);
+		return -1;
+	}
+
+	return 0;
+}
+
+void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
+{
+	struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+
+	mutex_lock(&sess->cmdsn_mutex);
+	list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+			&sess->sess_ooo_cmdsn_list, ooo_list) {
+
+		list_del(&ooo_cmdsn->ooo_list);
+		kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+	}
+	mutex_unlock(&sess->cmdsn_mutex);
+}
+
+int iscsit_handle_ooo_cmdsn(
+	struct iscsi_session *sess,
+	struct iscsi_cmd *cmd,
+	u32 cmdsn)
+{
+	int batch = 0;
+	struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL;
+
+	cmd->deferred_i_state		= cmd->i_state;
+	cmd->i_state			= ISTATE_DEFERRED_CMD;
+	cmd->cmd_flags			|= ICF_OOO_CMDSN;
+
+	if (list_empty(&sess->sess_ooo_cmdsn_list))
+		batch = 1;
+	else {
+		ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
+				typeof(*ooo_tail), ooo_list);
+		if (ooo_tail->cmdsn != (cmdsn - 1))
+			batch = 1;
+	}
+
+	ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
+	if (!ooo_cmdsn)
+		return -ENOMEM;
+
+	ooo_cmdsn->cmd			= cmd;
+	ooo_cmdsn->batch_count		= (batch) ?
+					  (cmdsn - sess->exp_cmd_sn) : 1;
+	ooo_cmdsn->cid			= cmd->conn->cid;
+	ooo_cmdsn->exp_cmdsn		= sess->exp_cmd_sn;
+	ooo_cmdsn->cmdsn		= cmdsn;
+
+	if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
+		kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int iscsit_set_dataout_timeout_values(
+	struct iscsi_cmd *cmd,
+	u32 *offset,
+	u32 *length)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_r2t *r2t;
+
+	if (cmd->unsolicited_data) {
+		*offset = 0;
+		*length = (conn->sess->sess_ops->FirstBurstLength >
+			   cmd->se_cmd.data_length) ?
+			   cmd->se_cmd.data_length :
+			   conn->sess->sess_ops->FirstBurstLength;
+		return 0;
+	}
+
+	spin_lock_bh(&cmd->r2t_lock);
+	if (list_empty(&cmd->cmd_r2t_list)) {
+		pr_err("cmd->cmd_r2t_list is empty!\n");
+		spin_unlock_bh(&cmd->r2t_lock);
+		return -1;
+	}
+
+	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+		if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) {
+			*offset = r2t->offset;
+			*length = r2t->xfer_len;
+			spin_unlock_bh(&cmd->r2t_lock);
+			return 0;
+		}
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	pr_err("Unable to locate any incomplete DataOUT"
+		" sequences for ITT: 0x%08x.\n", cmd->init_task_tag);
+
+	return -1;
+}
+
+/*
+ *	NOTE: Called from interrupt (timer) context.
+ */
+static void iscsit_handle_dataout_timeout(unsigned long data)
+{
+	u32 pdu_length = 0, pdu_offset = 0;
+	u32 r2t_length = 0, r2t_offset = 0;
+	struct iscsi_cmd *cmd = (struct iscsi_cmd *) data;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_session *sess = NULL;
+	struct iscsi_node_attrib *na;
+
+	iscsit_inc_conn_usage_count(conn);
+
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	if (cmd->dataout_timer_flags & ISCSI_TF_STOP) {
+		spin_unlock_bh(&cmd->dataout_timeout_lock);
+		iscsit_dec_conn_usage_count(conn);
+		return;
+	}
+	cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+	sess = conn->sess;
+	na = iscsit_tpg_get_node_attrib(sess);
+
+	if (!sess->sess_ops->ErrorRecoveryLevel) {
+		pr_debug("Unable to recover from DataOut timeout while"
+			" in ERL=0.\n");
+		goto failure;
+	}
+
+	if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
+		pr_debug("Command ITT: 0x%08x exceeded max retries"
+			" for DataOUT timeout %u, closing iSCSI connection.\n",
+			cmd->init_task_tag, na->dataout_timeout_retries);
+		goto failure;
+	}
+
+	cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
+
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		if (conn->sess->sess_ops->DataPDUInOrder) {
+			pdu_offset = cmd->write_data_done;
+			if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
+			     cmd->next_burst_len)) > cmd->se_cmd.data_length)
+				pdu_length = (cmd->se_cmd.data_length -
+					cmd->write_data_done);
+			else
+				pdu_length = (conn->sess->sess_ops->MaxBurstLength -
+						cmd->next_burst_len);
+		} else {
+			pdu_offset = cmd->seq_start_offset;
+			pdu_length = (cmd->seq_end_offset -
+				cmd->seq_start_offset);
+		}
+	} else {
+		if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset,
+				&pdu_length) < 0)
+			goto failure;
+	}
+
+	if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
+			&r2t_offset, &r2t_length) < 0)
+		goto failure;
+
+	pr_debug("Command ITT: 0x%08x timed out waiting for"
+		" completion of %sDataOUT Sequence Offset: %u, Length: %u\n",
+		cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " :
+		"", r2t_offset, r2t_length);
+
+	if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0)
+		goto failure;
+
+	iscsit_start_dataout_timer(cmd, conn);
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+	iscsit_dec_conn_usage_count(conn);
+
+	return;
+
+failure:
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+	iscsit_cause_connection_reinstatement(conn, 0);
+	iscsit_dec_conn_usage_count(conn);
+}
+
+void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&cmd->dataout_timeout_lock);
+		return;
+	}
+
+	mod_timer(&cmd->dataout_timer,
+		(get_jiffies_64() + na->dataout_timeout * HZ));
+	pr_debug("Updated DataOUT timer for ITT: 0x%08x",
+			cmd->init_task_tag);
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
+
+/*
+ *	Called with cmd->dataout_timeout_lock held.
+ */
+void iscsit_start_dataout_timer(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
+		return;
+
+	pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
+		" CID: %hu.\n", cmd->init_task_tag, conn->cid);
+
+	init_timer(&cmd->dataout_timer);
+	cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ);
+	cmd->dataout_timer.data = (unsigned long)cmd;
+	cmd->dataout_timer.function = iscsit_handle_dataout_timeout;
+	cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
+	cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&cmd->dataout_timer);
+}
+
+void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
+{
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&cmd->dataout_timeout_lock);
+		return;
+	}
+	cmd->dataout_timer_flags |= ISCSI_TF_STOP;
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+
+	del_timer_sync(&cmd->dataout_timer);
+
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+	pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n",
+			cmd->init_task_tag);
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
+EXPORT_SYMBOL(iscsit_stop_dataout_timer);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
new file mode 100644
index 0000000..2a3ebf1
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -0,0 +1,26 @@
+#ifndef ISCSI_TARGET_ERL1_H
+#define ISCSI_TARGET_ERL1_H
+
+extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
+extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+			struct iscsi_cmd *, struct iscsi_datain_req *);
+extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
+			struct iscsi_cmd *, struct iscsi_datain_req *);
+extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
+			itt_t, u32, u32, u32);
+extern int iscsit_handle_status_snack(struct iscsi_conn *, itt_t, u32,
+			u32, u32);
+extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
+extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
+extern int iscsit_recover_dataout_sequence(struct iscsi_cmd *, u32, u32);
+extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *);
+extern void iscsit_free_all_ooo_cmdsns(struct iscsi_session *);
+extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
+extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
+extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
+extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
+extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
+extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
+extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+
+#endif /* ISCSI_TARGET_ERL1_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
new file mode 100644
index 0000000..e24f1c7
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -0,0 +1,436 @@
+/*******************************************************************************
+ * This file contains error recovery level two functions used by
+ * the iSCSI Target driver.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target.h"
+
+/*
+ *	FIXME: Does RData SNACK apply here as well?
+ */
+void iscsit_create_conn_recovery_datain_values(
+	struct iscsi_cmd *cmd,
+	__be32 exp_data_sn)
+{
+	u32 data_sn = 0;
+	struct iscsi_conn *conn = cmd->conn;
+
+	cmd->next_burst_len = 0;
+	cmd->read_data_done = 0;
+
+	while (be32_to_cpu(exp_data_sn) > data_sn) {
+		if ((cmd->next_burst_len +
+		     conn->conn_ops->MaxRecvDataSegmentLength) <
+		     conn->sess->sess_ops->MaxBurstLength) {
+			cmd->read_data_done +=
+			       conn->conn_ops->MaxRecvDataSegmentLength;
+			cmd->next_burst_len +=
+			       conn->conn_ops->MaxRecvDataSegmentLength;
+		} else {
+			cmd->read_data_done +=
+				(conn->sess->sess_ops->MaxBurstLength -
+				cmd->next_burst_len);
+			cmd->next_burst_len = 0;
+		}
+		data_sn++;
+	}
+}
+
+void iscsit_create_conn_recovery_dataout_values(
+	struct iscsi_cmd *cmd)
+{
+	u32 write_data_done = 0;
+	struct iscsi_conn *conn = cmd->conn;
+
+	cmd->data_sn = 0;
+	cmd->next_burst_len = 0;
+
+	while (cmd->write_data_done > write_data_done) {
+		if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
+		     cmd->write_data_done)
+			write_data_done += conn->sess->sess_ops->MaxBurstLength;
+		else
+			break;
+	}
+
+	cmd->write_data_done = write_data_done;
+}
+
+static int iscsit_attach_active_connection_recovery_entry(
+	struct iscsi_session *sess,
+	struct iscsi_conn_recovery *cr)
+{
+	spin_lock(&sess->cr_a_lock);
+	list_add_tail(&cr->cr_list, &sess->cr_active_list);
+	spin_unlock(&sess->cr_a_lock);
+
+	return 0;
+}
+
+static int iscsit_attach_inactive_connection_recovery_entry(
+	struct iscsi_session *sess,
+	struct iscsi_conn_recovery *cr)
+{
+	spin_lock(&sess->cr_i_lock);
+	list_add_tail(&cr->cr_list, &sess->cr_inactive_list);
+
+	sess->conn_recovery_count++;
+	pr_debug("Incremented connection recovery count to %u for"
+		" SID: %u\n", sess->conn_recovery_count, sess->sid);
+	spin_unlock(&sess->cr_i_lock);
+
+	return 0;
+}
+
+struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
+	struct iscsi_session *sess,
+	u16 cid)
+{
+	struct iscsi_conn_recovery *cr;
+
+	spin_lock(&sess->cr_i_lock);
+	list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
+		if (cr->cid == cid) {
+			spin_unlock(&sess->cr_i_lock);
+			return cr;
+		}
+	}
+	spin_unlock(&sess->cr_i_lock);
+
+	return NULL;
+}
+
+void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+{
+	struct iscsi_cmd *cmd, *cmd_tmp;
+	struct iscsi_conn_recovery *cr, *cr_tmp;
+
+	spin_lock(&sess->cr_a_lock);
+	list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) {
+		list_del(&cr->cr_list);
+		spin_unlock(&sess->cr_a_lock);
+
+		spin_lock(&cr->conn_recovery_cmd_lock);
+		list_for_each_entry_safe(cmd, cmd_tmp,
+				&cr->conn_recovery_cmd_list, i_conn_node) {
+
+			list_del_init(&cmd->i_conn_node);
+			cmd->conn = NULL;
+			spin_unlock(&cr->conn_recovery_cmd_lock);
+			iscsit_free_cmd(cmd, true);
+			spin_lock(&cr->conn_recovery_cmd_lock);
+		}
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+		spin_lock(&sess->cr_a_lock);
+
+		kfree(cr);
+	}
+	spin_unlock(&sess->cr_a_lock);
+
+	spin_lock(&sess->cr_i_lock);
+	list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) {
+		list_del(&cr->cr_list);
+		spin_unlock(&sess->cr_i_lock);
+
+		spin_lock(&cr->conn_recovery_cmd_lock);
+		list_for_each_entry_safe(cmd, cmd_tmp,
+				&cr->conn_recovery_cmd_list, i_conn_node) {
+
+			list_del_init(&cmd->i_conn_node);
+			cmd->conn = NULL;
+			spin_unlock(&cr->conn_recovery_cmd_lock);
+			iscsit_free_cmd(cmd, true);
+			spin_lock(&cr->conn_recovery_cmd_lock);
+		}
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+		spin_lock(&sess->cr_i_lock);
+
+		kfree(cr);
+	}
+	spin_unlock(&sess->cr_i_lock);
+}
+
+int iscsit_remove_active_connection_recovery_entry(
+	struct iscsi_conn_recovery *cr,
+	struct iscsi_session *sess)
+{
+	spin_lock(&sess->cr_a_lock);
+	list_del(&cr->cr_list);
+
+	sess->conn_recovery_count--;
+	pr_debug("Decremented connection recovery count to %u for"
+		" SID: %u\n", sess->conn_recovery_count, sess->sid);
+	spin_unlock(&sess->cr_a_lock);
+
+	kfree(cr);
+
+	return 0;
+}
+
+static void iscsit_remove_inactive_connection_recovery_entry(
+	struct iscsi_conn_recovery *cr,
+	struct iscsi_session *sess)
+{
+	spin_lock(&sess->cr_i_lock);
+	list_del(&cr->cr_list);
+	spin_unlock(&sess->cr_i_lock);
+}
+
+/*
+ *	Called with cr->conn_recovery_cmd_lock help.
+ */
+int iscsit_remove_cmd_from_connection_recovery(
+	struct iscsi_cmd *cmd,
+	struct iscsi_session *sess)
+{
+	struct iscsi_conn_recovery *cr;
+
+	if (!cmd->cr) {
+		pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+			" is NULL!\n", cmd->init_task_tag);
+		BUG();
+	}
+	cr = cmd->cr;
+
+	list_del_init(&cmd->i_conn_node);
+	return --cr->cmd_count;
+}
+
+void iscsit_discard_cr_cmds_by_expstatsn(
+	struct iscsi_conn_recovery *cr,
+	u32 exp_statsn)
+{
+	u32 dropped_count = 0;
+	struct iscsi_cmd *cmd, *cmd_tmp;
+	struct iscsi_session *sess = cr->sess;
+
+	spin_lock(&cr->conn_recovery_cmd_lock);
+	list_for_each_entry_safe(cmd, cmd_tmp,
+			&cr->conn_recovery_cmd_list, i_conn_node) {
+
+		if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
+		     (cmd->deferred_i_state != ISTATE_REMOVE)) ||
+		     (cmd->stat_sn >= exp_statsn)) {
+			continue;
+		}
+
+		dropped_count++;
+		pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:"
+			" 0x%08x, CID: %hu.\n", cmd->init_task_tag,
+				cmd->stat_sn, cr->cid);
+
+		iscsit_remove_cmd_from_connection_recovery(cmd, sess);
+
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+		iscsit_free_cmd(cmd, true);
+		spin_lock(&cr->conn_recovery_cmd_lock);
+	}
+	spin_unlock(&cr->conn_recovery_cmd_lock);
+
+	pr_debug("Dropped %u total acknowledged commands on"
+		" CID: %hu less than old ExpStatSN: 0x%08x\n",
+			dropped_count, cr->cid, exp_statsn);
+
+	if (!cr->cmd_count) {
+		pr_debug("No commands to be reassigned for failed"
+			" connection CID: %hu on SID: %u\n",
+			cr->cid, sess->sid);
+		iscsit_remove_inactive_connection_recovery_entry(cr, sess);
+		iscsit_attach_active_connection_recovery_entry(sess, cr);
+		pr_debug("iSCSI connection recovery successful for CID:"
+			" %hu on SID: %u\n", cr->cid, sess->sid);
+		iscsit_remove_active_connection_recovery_entry(cr, sess);
+	} else {
+		iscsit_remove_inactive_connection_recovery_entry(cr, sess);
+		iscsit_attach_active_connection_recovery_entry(sess, cr);
+	}
+}
+
+int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+{
+	u32 dropped_count = 0;
+	struct iscsi_cmd *cmd, *cmd_tmp;
+	struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+	struct iscsi_session *sess = conn->sess;
+
+	mutex_lock(&sess->cmdsn_mutex);
+	list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+			&sess->sess_ooo_cmdsn_list, ooo_list) {
+
+		if (ooo_cmdsn->cid != conn->cid)
+			continue;
+
+		dropped_count++;
+		pr_debug("Dropping unacknowledged CmdSN:"
+		" 0x%08x during connection recovery on CID: %hu\n",
+			ooo_cmdsn->cmdsn, conn->cid);
+		iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+	}
+	mutex_unlock(&sess->cmdsn_mutex);
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
+		if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
+			continue;
+
+		list_del_init(&cmd->i_conn_node);
+
+		spin_unlock_bh(&conn->cmd_lock);
+		iscsit_free_cmd(cmd, true);
+		spin_lock_bh(&conn->cmd_lock);
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	pr_debug("Dropped %u total unacknowledged commands on CID:"
+		" %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid,
+				sess->exp_cmd_sn);
+	return 0;
+}
+
+int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+{
+	u32 cmd_count = 0;
+	struct iscsi_cmd *cmd, *cmd_tmp;
+	struct iscsi_conn_recovery *cr;
+
+	/*
+	 * Allocate an struct iscsi_conn_recovery for this connection.
+	 * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
+	 * (struct iscsi_cmd->cr) so we need to allocate this before preparing the
+	 * connection's command list for connection recovery.
+	 */
+	cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
+	if (!cr) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_conn_recovery.\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&cr->cr_list);
+	INIT_LIST_HEAD(&cr->conn_recovery_cmd_list);
+	spin_lock_init(&cr->conn_recovery_cmd_lock);
+	/*
+	 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
+	 * ISCSI_OP_NOOP_OUT opcodes.  For all other opcodes call
+	 * list_del_init(&cmd->i_conn_node); to release the command to the
+	 * session pool and remove it from the connection's list.
+	 *
+	 * Also stop the DataOUT timer, which will be restarted after
+	 * sending the TMR response.
+	 */
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
+
+		if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
+		    (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
+			pr_debug("Not performing realligence on"
+				" Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
+				" CID: %hu\n", cmd->iscsi_opcode,
+				cmd->init_task_tag, cmd->cmd_sn, conn->cid);
+
+			list_del_init(&cmd->i_conn_node);
+			spin_unlock_bh(&conn->cmd_lock);
+			iscsit_free_cmd(cmd, true);
+			spin_lock_bh(&conn->cmd_lock);
+			continue;
+		}
+
+		/*
+		 * Special case where commands greater than or equal to
+		 * the session's ExpCmdSN are attached to the connection
+		 * list but not to the out of order CmdSN list.  The one
+		 * obvious case is when a command with immediate data
+		 * attached must only check the CmdSN against ExpCmdSN
+		 * after the data is received.  The special case below
+		 * is when the connection fails before data is received,
+		 * but also may apply to other PDUs, so it has been
+		 * made generic here.
+		 */
+		if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
+		     iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
+			list_del_init(&cmd->i_conn_node);
+			spin_unlock_bh(&conn->cmd_lock);
+			iscsit_free_cmd(cmd, true);
+			spin_lock_bh(&conn->cmd_lock);
+			continue;
+		}
+
+		cmd_count++;
+		pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
+			" CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
+			" realligence.\n", cmd->iscsi_opcode,
+			cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
+			conn->cid);
+
+		cmd->deferred_i_state = cmd->i_state;
+		cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY;
+
+		if (cmd->data_direction == DMA_TO_DEVICE)
+			iscsit_stop_dataout_timer(cmd);
+
+		cmd->sess = conn->sess;
+
+		list_del_init(&cmd->i_conn_node);
+		spin_unlock_bh(&conn->cmd_lock);
+
+		iscsit_free_all_datain_reqs(cmd);
+
+		transport_wait_for_tasks(&cmd->se_cmd);
+		/*
+		 * Add the struct iscsi_cmd to the connection recovery cmd list
+		 */
+		spin_lock(&cr->conn_recovery_cmd_lock);
+		list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list);
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+
+		spin_lock_bh(&conn->cmd_lock);
+		cmd->cr = cr;
+		cmd->conn = NULL;
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+	/*
+	 * Fill in the various values in the preallocated struct iscsi_conn_recovery.
+	 */
+	cr->cid = conn->cid;
+	cr->cmd_count = cmd_count;
+	cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
+	cr->maxxmitdatasegmentlength = conn->conn_ops->MaxXmitDataSegmentLength;
+	cr->sess = conn->sess;
+
+	iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
+
+	return 0;
+}
+
+int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
+{
+	atomic_set(&conn->connection_recovery, 1);
+
+	if (iscsit_close_connection(conn) < 0)
+		return -1;
+
+	return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
new file mode 100644
index 0000000..63f2501
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -0,0 +1,18 @@
+#ifndef ISCSI_TARGET_ERL2_H
+#define ISCSI_TARGET_ERL2_H
+
+extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
+extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
+extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
+			struct iscsi_session *, u16);
+extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
+extern int iscsit_remove_active_connection_recovery_entry(
+			struct iscsi_conn_recovery *, struct iscsi_session *);
+extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
+			struct iscsi_session *);
+extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
+extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
+extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *);
+extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_ERL2_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
new file mode 100644
index 0000000..bc2cbff
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -0,0 +1,1447 @@
+/*******************************************************************************
+ * This file contains the login functions used by the iSCSI Target driver.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/crypto.h>
+#include <linux/idr.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include <target/iscsi/iscsi_target_stat.h>
+#include "iscsi_target_device.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_parameters.h"
+
+#include <target/iscsi/iscsi_transport.h>
+
+static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
+{
+	struct iscsi_login *login;
+
+	login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
+	if (!login) {
+		pr_err("Unable to allocate memory for struct iscsi_login.\n");
+		return NULL;
+	}
+	conn->login = login;
+	login->conn = conn;
+	login->first_request = 1;
+
+	login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+	if (!login->req_buf) {
+		pr_err("Unable to allocate memory for response buffer.\n");
+		goto out_login;
+	}
+
+	login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+	if (!login->rsp_buf) {
+		pr_err("Unable to allocate memory for request buffer.\n");
+		goto out_req_buf;
+	}
+
+	conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+	if (!conn->conn_ops) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_conn_ops.\n");
+		goto out_rsp_buf;
+	}
+
+	init_waitqueue_head(&conn->queues_wq);
+	INIT_LIST_HEAD(&conn->conn_list);
+	INIT_LIST_HEAD(&conn->conn_cmd_list);
+	INIT_LIST_HEAD(&conn->immed_queue_list);
+	INIT_LIST_HEAD(&conn->response_queue_list);
+	init_completion(&conn->conn_post_wait_comp);
+	init_completion(&conn->conn_wait_comp);
+	init_completion(&conn->conn_wait_rcfr_comp);
+	init_completion(&conn->conn_waiting_on_uc_comp);
+	init_completion(&conn->conn_logout_comp);
+	init_completion(&conn->rx_half_close_comp);
+	init_completion(&conn->tx_half_close_comp);
+	init_completion(&conn->rx_login_comp);
+	spin_lock_init(&conn->cmd_lock);
+	spin_lock_init(&conn->conn_usage_lock);
+	spin_lock_init(&conn->immed_queue_lock);
+	spin_lock_init(&conn->nopin_timer_lock);
+	spin_lock_init(&conn->response_queue_lock);
+	spin_lock_init(&conn->state_lock);
+
+	if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+		pr_err("Unable to allocate conn->conn_cpumask\n");
+		goto out_conn_ops;
+	}
+	conn->conn_login = login;
+
+	return login;
+
+out_conn_ops:
+	kfree(conn->conn_ops);
+out_rsp_buf:
+	kfree(login->rsp_buf);
+out_req_buf:
+	kfree(login->req_buf);
+out_login:
+	kfree(login);
+	return NULL;
+}
+
+/*
+ * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
+ * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel
+ */
+int iscsi_login_setup_crypto(struct iscsi_conn *conn)
+{
+	/*
+	 * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
+	 * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
+	 * to software 1x8 byte slicing from crc32c.ko
+	 */
+	conn->conn_rx_hash.flags = 0;
+	conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+						CRYPTO_ALG_ASYNC);
+	if (IS_ERR(conn->conn_rx_hash.tfm)) {
+		pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
+		return -ENOMEM;
+	}
+
+	conn->conn_tx_hash.flags = 0;
+	conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+						CRYPTO_ALG_ASYNC);
+	if (IS_ERR(conn->conn_tx_hash.tfm)) {
+		pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
+		crypto_free_hash(conn->conn_rx_hash.tfm);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int iscsi_login_check_initiator_version(
+	struct iscsi_conn *conn,
+	u8 version_max,
+	u8 version_min)
+{
+	if ((version_max != 0x00) || (version_min != 0x00)) {
+		pr_err("Unsupported iSCSI IETF Pre-RFC Revision,"
+			" version Min/Max 0x%02x/0x%02x, rejecting login.\n",
+			version_min, version_max);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_NO_VERSION);
+		return -1;
+	}
+
+	return 0;
+}
+
+int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+{
+	int sessiontype;
+	struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
+	struct iscsi_portal_group *tpg = conn->tpg;
+	struct iscsi_session *sess = NULL, *sess_p = NULL;
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+	struct se_session *se_sess, *se_sess_tmp;
+
+	initiatorname_param = iscsi_find_param_from_key(
+			INITIATORNAME, conn->param_list);
+	sessiontype_param = iscsi_find_param_from_key(
+			SESSIONTYPE, conn->param_list);
+	if (!initiatorname_param || !sessiontype_param) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+			ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+		return -1;
+	}
+
+	sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
+
+	spin_lock_bh(&se_tpg->session_lock);
+	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+			sess_list) {
+
+		sess_p = se_sess->fabric_sess_ptr;
+		spin_lock(&sess_p->conn_lock);
+		if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+		    atomic_read(&sess_p->session_logout) ||
+		    (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+			spin_unlock(&sess_p->conn_lock);
+			continue;
+		}
+		if (!memcmp(sess_p->isid, conn->sess->isid, 6) &&
+		   (!strcmp(sess_p->sess_ops->InitiatorName,
+			    initiatorname_param->value) &&
+		   (sess_p->sess_ops->SessionType == sessiontype))) {
+			atomic_set(&sess_p->session_reinstatement, 1);
+			atomic_set(&sess_p->session_fall_back_to_erl0, 1);
+			spin_unlock(&sess_p->conn_lock);
+			iscsit_inc_session_usage_count(sess_p);
+			iscsit_stop_time2retain_timer(sess_p);
+			sess = sess_p;
+			break;
+		}
+		spin_unlock(&sess_p->conn_lock);
+	}
+	spin_unlock_bh(&se_tpg->session_lock);
+	/*
+	 * If the Time2Retain handler has expired, the session is already gone.
+	 */
+	if (!sess)
+		return 0;
+
+	pr_debug("%s iSCSI Session SID %u is still active for %s,"
+		" preforming session reinstatement.\n", (sessiontype) ?
+		"Discovery" : "Normal", sess->sid,
+		sess->sess_ops->InitiatorName);
+
+	spin_lock_bh(&sess->conn_lock);
+	if (sess->session_state == TARG_SESS_STATE_FAILED) {
+		spin_unlock_bh(&sess->conn_lock);
+		iscsit_dec_session_usage_count(sess);
+		target_put_session(sess->se_sess);
+		return 0;
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	iscsit_stop_session(sess, 1, 1);
+	iscsit_dec_session_usage_count(sess);
+
+	target_put_session(sess->se_sess);
+	return 0;
+}
+
+static void iscsi_login_set_conn_values(
+	struct iscsi_session *sess,
+	struct iscsi_conn *conn,
+	__be16 cid)
+{
+	conn->sess		= sess;
+	conn->cid		= be16_to_cpu(cid);
+	/*
+	 * Generate a random Status sequence number (statsn) for the new
+	 * iSCSI connection.
+	 */
+	get_random_bytes(&conn->stat_sn, sizeof(u32));
+
+	mutex_lock(&auth_id_lock);
+	conn->auth_id		= iscsit_global->auth_id++;
+	mutex_unlock(&auth_id_lock);
+}
+
+static __printf(2, 3) int iscsi_change_param_sprintf(
+	struct iscsi_conn *conn,
+	const char *fmt, ...)
+{
+	va_list args;
+	unsigned char buf[64];
+
+	memset(buf, 0, sizeof buf);
+
+	va_start(args, fmt);
+	vsnprintf(buf, sizeof buf, fmt, args);
+	va_end(args);
+
+	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ *	This is the leading connection of a new session,
+ *	or session reinstatement.
+ */
+static int iscsi_login_zero_tsih_s1(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	struct iscsi_session *sess = NULL;
+	struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+	int ret;
+
+	sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
+	if (!sess) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		pr_err("Could not allocate memory for session\n");
+		return -ENOMEM;
+	}
+
+	iscsi_login_set_conn_values(sess, conn, pdu->cid);
+	sess->init_task_tag	= pdu->itt;
+	memcpy(&sess->isid, pdu->isid, 6);
+	sess->exp_cmd_sn	= be32_to_cpu(pdu->cmdsn);
+	INIT_LIST_HEAD(&sess->sess_conn_list);
+	INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
+	INIT_LIST_HEAD(&sess->cr_active_list);
+	INIT_LIST_HEAD(&sess->cr_inactive_list);
+	init_completion(&sess->async_msg_comp);
+	init_completion(&sess->reinstatement_comp);
+	init_completion(&sess->session_wait_comp);
+	init_completion(&sess->session_waiting_on_uc_comp);
+	mutex_init(&sess->cmdsn_mutex);
+	spin_lock_init(&sess->conn_lock);
+	spin_lock_init(&sess->cr_a_lock);
+	spin_lock_init(&sess->cr_i_lock);
+	spin_lock_init(&sess->session_usage_lock);
+	spin_lock_init(&sess->ttt_lock);
+
+	idr_preload(GFP_KERNEL);
+	spin_lock_bh(&sess_idr_lock);
+	ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT);
+	if (ret >= 0)
+		sess->session_index = ret;
+	spin_unlock_bh(&sess_idr_lock);
+	idr_preload_end();
+
+	if (ret < 0) {
+		pr_err("idr_alloc() for sess_idr failed\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		kfree(sess);
+		return -ENOMEM;
+	}
+
+	sess->creation_time = get_jiffies_64();
+	/*
+	 * The FFP CmdSN window values will be allocated from the TPG's
+	 * Initiator Node's ACL once the login has been successfully completed.
+	 */
+	atomic_set(&sess->max_cmd_sn, be32_to_cpu(pdu->cmdsn));
+
+	sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
+	if (!sess->sess_ops) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		pr_err("Unable to allocate memory for"
+				" struct iscsi_sess_ops.\n");
+		kfree(sess);
+		return -ENOMEM;
+	}
+
+	sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
+	if (IS_ERR(sess->se_sess)) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		kfree(sess->sess_ops);
+		kfree(sess);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int iscsi_login_zero_tsih_s2(
+	struct iscsi_conn *conn)
+{
+	struct iscsi_node_attrib *na;
+	struct iscsi_session *sess = conn->sess;
+	bool iser = false;
+
+	sess->tpg = conn->tpg;
+
+	/*
+	 * Assign a new TPG Session Handle.  Note this is protected with
+	 * struct iscsi_portal_group->np_login_sem from iscsit_access_np().
+	 */
+	sess->tsih = ++sess->tpg->ntsih;
+	if (!sess->tsih)
+		sess->tsih = ++sess->tpg->ntsih;
+
+	/*
+	 * Create the default params from user defined values..
+	 */
+	if (iscsi_copy_param_list(&conn->param_list,
+				conn->tpg->param_list, 1) < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+
+	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
+		iser = true;
+
+	iscsi_set_keys_to_negotiate(conn->param_list, iser);
+
+	if (sess->sess_ops->SessionType)
+		return iscsi_set_keys_irrelevant_for_discovery(
+				conn->param_list);
+
+	na = iscsit_tpg_get_node_attrib(sess);
+
+	/*
+	 * Need to send TargetPortalGroupTag back in first login response
+	 * on any iSCSI connection where the Initiator provides TargetName.
+	 * See 5.3.1.  Login Phase Start
+	 *
+	 * In our case, we have already located the struct iscsi_tiqn at this point.
+	 */
+	if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
+		return -1;
+
+	/*
+	 * Workaround for Initiators that have broken connection recovery logic.
+	 *
+	 * "We would really like to get rid of this." Linux-iSCSI.org team
+	 */
+	if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))
+		return -1;
+
+	/*
+	 * Set RDMAExtensions=Yes by default for iSER enabled network portals
+	 */
+	if (iser) {
+		struct iscsi_param *param;
+		unsigned long mrdsl, off;
+		int rc;
+
+		if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes"))
+			return -1;
+
+		/*
+		 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
+		 * Immediate Data + Unsolicitied Data-OUT if necessary..
+		 */
+		param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
+						  conn->param_list);
+		if (!param) {
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+			return -1;
+		}
+		rc = kstrtoul(param->value, 0, &mrdsl);
+		if (rc < 0) {
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+			return -1;
+		}
+		off = mrdsl % PAGE_SIZE;
+		if (!off)
+			goto check_prot;
+
+		if (mrdsl < PAGE_SIZE)
+			mrdsl = PAGE_SIZE;
+		else
+			mrdsl -= off;
+
+		pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"
+			" to PAGE_SIZE\n", mrdsl);
+
+		if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl))
+			return -1;
+		/*
+		 * ISER currently requires that ImmediateData + Unsolicited
+		 * Data be disabled when protection / signature MRs are enabled.
+		 */
+check_prot:
+		if (sess->se_sess->sup_prot_ops &
+		   (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
+		    TARGET_PROT_DOUT_INSERT)) {
+
+			if (iscsi_change_param_sprintf(conn, "ImmediateData=No"))
+				return -1;
+
+			if (iscsi_change_param_sprintf(conn, "InitialR2T=Yes"))
+				return -1;
+
+			pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
+				 " T10-PI enabled ISER session\n");
+		}
+	}
+
+	return 0;
+}
+
+static int iscsi_login_non_zero_tsih_s1(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+	iscsi_login_set_conn_values(NULL, conn, pdu->cid);
+	return 0;
+}
+
+/*
+ *	Add a new connection to an existing session.
+ */
+static int iscsi_login_non_zero_tsih_s2(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	struct iscsi_portal_group *tpg = conn->tpg;
+	struct iscsi_session *sess = NULL, *sess_p = NULL;
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+	struct se_session *se_sess, *se_sess_tmp;
+	struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+	bool iser = false;
+
+	spin_lock_bh(&se_tpg->session_lock);
+	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+			sess_list) {
+
+		sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+		    atomic_read(&sess_p->session_logout) ||
+		   (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
+			continue;
+		if (!memcmp(sess_p->isid, pdu->isid, 6) &&
+		     (sess_p->tsih == be16_to_cpu(pdu->tsih))) {
+			iscsit_inc_session_usage_count(sess_p);
+			iscsit_stop_time2retain_timer(sess_p);
+			sess = sess_p;
+			break;
+		}
+	}
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	/*
+	 * If the Time2Retain handler has expired, the session is already gone.
+	 */
+	if (!sess) {
+		pr_err("Initiator attempting to add a connection to"
+			" a non-existent session, rejecting iSCSI Login.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_NO_SESSION);
+		return -1;
+	}
+
+	/*
+	 * Stop the Time2Retain timer if this is a failed session, we restart
+	 * the timer if the login is not successful.
+	 */
+	spin_lock_bh(&sess->conn_lock);
+	if (sess->session_state == TARG_SESS_STATE_FAILED)
+		atomic_set(&sess->session_continuation, 1);
+	spin_unlock_bh(&sess->conn_lock);
+
+	iscsi_login_set_conn_values(sess, conn, pdu->cid);
+
+	if (iscsi_copy_param_list(&conn->param_list,
+			conn->tpg->param_list, 0) < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+
+	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
+		iser = true;
+
+	iscsi_set_keys_to_negotiate(conn->param_list, iser);
+	/*
+	 * Need to send TargetPortalGroupTag back in first login response
+	 * on any iSCSI connection where the Initiator provides TargetName.
+	 * See 5.3.1.  Login Phase Start
+	 *
+	 * In our case, we have already located the struct iscsi_tiqn at this point.
+	 */
+	if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
+		return -1;
+
+	return 0;
+}
+
+int iscsi_login_post_auth_non_zero_tsih(
+	struct iscsi_conn *conn,
+	u16 cid,
+	u32 exp_statsn)
+{
+	struct iscsi_conn *conn_ptr = NULL;
+	struct iscsi_conn_recovery *cr = NULL;
+	struct iscsi_session *sess = conn->sess;
+
+	/*
+	 * By following item 5 in the login table,  if we have found
+	 * an existing ISID and a valid/existing TSIH and an existing
+	 * CID we do connection reinstatement.  Currently we dont not
+	 * support it so we send back an non-zero status class to the
+	 * initiator and release the new connection.
+	 */
+	conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
+	if (conn_ptr) {
+		pr_err("Connection exists with CID %hu for %s,"
+			" performing connection reinstatement.\n",
+			conn_ptr->cid, sess->sess_ops->InitiatorName);
+
+		iscsit_connection_reinstatement_rcfr(conn_ptr);
+		iscsit_dec_conn_usage_count(conn_ptr);
+	}
+
+	/*
+	 * Check for any connection recovery entires containing CID.
+	 * We use the original ExpStatSN sent in the first login request
+	 * to acknowledge commands for the failed connection.
+	 *
+	 * Also note that an explict logout may have already been sent,
+	 * but the response may not be sent due to additional connection
+	 * loss.
+	 */
+	if (sess->sess_ops->ErrorRecoveryLevel == 2) {
+		cr = iscsit_get_inactive_connection_recovery_entry(
+				sess, cid);
+		if (cr) {
+			pr_debug("Performing implicit logout"
+				" for connection recovery on CID: %hu\n",
+					conn->cid);
+			iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn);
+		}
+	}
+
+	/*
+	 * Else we follow item 4 from the login table in that we have
+	 * found an existing ISID and a valid/existing TSIH and a new
+	 * CID we go ahead and continue to add a new connection to the
+	 * session.
+	 */
+	pr_debug("Adding CID %hu to existing session for %s.\n",
+			cid, sess->sess_ops->InitiatorName);
+
+	if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) {
+		pr_err("Adding additional connection to this session"
+			" would exceed MaxConnections %d, login failed.\n",
+				sess->sess_ops->MaxConnections);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_ISID_ERROR);
+		return -1;
+	}
+
+	return 0;
+}
+
+static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	/*
+	 * FIXME: Unsolicitied NopIN support for ISER
+	 */
+	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
+		return;
+
+	if (!sess->sess_ops->SessionType)
+		iscsit_start_nopin_timer(conn);
+}
+
+int iscsit_start_kthreads(struct iscsi_conn *conn)
+{
+	int ret = 0;
+
+	spin_lock(&iscsit_global->ts_bitmap_lock);
+	conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
+					ISCSIT_BITMAP_BITS, get_order(1));
+	spin_unlock(&iscsit_global->ts_bitmap_lock);
+
+	if (conn->bitmap_id < 0) {
+		pr_err("bitmap_find_free_region() failed for"
+		       " iscsit_start_kthreads()\n");
+		return -ENOMEM;
+	}
+
+	conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
+				      "%s", ISCSI_TX_THREAD_NAME);
+	if (IS_ERR(conn->tx_thread)) {
+		pr_err("Unable to start iscsi_target_tx_thread\n");
+		ret = PTR_ERR(conn->tx_thread);
+		goto out_bitmap;
+	}
+	conn->tx_thread_active = true;
+
+	conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
+				      "%s", ISCSI_RX_THREAD_NAME);
+	if (IS_ERR(conn->rx_thread)) {
+		pr_err("Unable to start iscsi_target_rx_thread\n");
+		ret = PTR_ERR(conn->rx_thread);
+		goto out_tx;
+	}
+	conn->rx_thread_active = true;
+
+	return 0;
+out_tx:
+	send_sig(SIGINT, conn->tx_thread, 1);
+	kthread_stop(conn->tx_thread);
+	conn->tx_thread_active = false;
+out_bitmap:
+	spin_lock(&iscsit_global->ts_bitmap_lock);
+	bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+			      get_order(1));
+	spin_unlock(&iscsit_global->ts_bitmap_lock);
+	return ret;
+}
+
+void iscsi_post_login_handler(
+	struct iscsi_np *np,
+	struct iscsi_conn *conn,
+	u8 zero_tsih)
+{
+	int stop_timer = 0;
+	struct iscsi_session *sess = conn->sess;
+	struct se_session *se_sess = sess->se_sess;
+	struct iscsi_portal_group *tpg = sess->tpg;
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+	iscsit_inc_conn_usage_count(conn);
+
+	iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS,
+			ISCSI_LOGIN_STATUS_ACCEPT);
+
+	pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n");
+	conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
+
+	iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
+	/*
+	 * SCSI Initiator -> SCSI Target Port Mapping
+	 */
+	if (!zero_tsih) {
+		iscsi_set_session_parameters(sess->sess_ops,
+				conn->param_list, 0);
+		iscsi_release_param_list(conn->param_list);
+		conn->param_list = NULL;
+
+		spin_lock_bh(&sess->conn_lock);
+		atomic_set(&sess->session_continuation, 0);
+		if (sess->session_state == TARG_SESS_STATE_FAILED) {
+			pr_debug("Moving to"
+					" TARG_SESS_STATE_LOGGED_IN.\n");
+			sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+			stop_timer = 1;
+		}
+
+		pr_debug("iSCSI Login successful on CID: %hu from %pISpc to"
+			" %pISpc,%hu\n", conn->cid, &conn->login_sockaddr,
+			&conn->local_sockaddr, tpg->tpgt);
+
+		list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+		atomic_inc(&sess->nconn);
+		pr_debug("Incremented iSCSI Connection count to %hu"
+			" from node: %s\n", atomic_read(&sess->nconn),
+			sess->sess_ops->InitiatorName);
+		spin_unlock_bh(&sess->conn_lock);
+
+		iscsi_post_login_start_timers(conn);
+		/*
+		 * Determine CPU mask to ensure connection's RX and TX kthreads
+		 * are scheduled on the same CPU.
+		 */
+		iscsit_thread_get_cpumask(conn);
+		conn->conn_rx_reset_cpumask = 1;
+		conn->conn_tx_reset_cpumask = 1;
+		/*
+		 * Wakeup the sleeping iscsi_target_rx_thread() now that
+		 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+		 */
+		complete(&conn->rx_login_comp);
+		iscsit_dec_conn_usage_count(conn);
+
+		if (stop_timer) {
+			spin_lock_bh(&se_tpg->session_lock);
+			iscsit_stop_time2retain_timer(sess);
+			spin_unlock_bh(&se_tpg->session_lock);
+		}
+		iscsit_dec_session_usage_count(sess);
+		return;
+	}
+
+	iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
+	iscsi_release_param_list(conn->param_list);
+	conn->param_list = NULL;
+
+	iscsit_determine_maxcmdsn(sess);
+
+	spin_lock_bh(&se_tpg->session_lock);
+	__transport_register_session(&sess->tpg->tpg_se_tpg,
+			se_sess->se_node_acl, se_sess, sess);
+	pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
+	sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+
+	pr_debug("iSCSI Login successful on CID: %hu from %pISpc to %pISpc,%hu\n",
+		conn->cid, &conn->login_sockaddr, &conn->local_sockaddr,
+		tpg->tpgt);
+
+	spin_lock_bh(&sess->conn_lock);
+	list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+	atomic_inc(&sess->nconn);
+	pr_debug("Incremented iSCSI Connection count to %hu from node:"
+		" %s\n", atomic_read(&sess->nconn),
+		sess->sess_ops->InitiatorName);
+	spin_unlock_bh(&sess->conn_lock);
+
+	sess->sid = tpg->sid++;
+	if (!sess->sid)
+		sess->sid = tpg->sid++;
+	pr_debug("Established iSCSI session from node: %s\n",
+			sess->sess_ops->InitiatorName);
+
+	tpg->nsessions++;
+	if (tpg->tpg_tiqn)
+		tpg->tpg_tiqn->tiqn_nsessions++;
+
+	pr_debug("Incremented number of active iSCSI sessions to %u on"
+		" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	iscsi_post_login_start_timers(conn);
+	/*
+	 * Determine CPU mask to ensure connection's RX and TX kthreads
+	 * are scheduled on the same CPU.
+	 */
+	iscsit_thread_get_cpumask(conn);
+	conn->conn_rx_reset_cpumask = 1;
+	conn->conn_tx_reset_cpumask = 1;
+	/*
+	 * Wakeup the sleeping iscsi_target_rx_thread() now that
+	 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+	 */
+	complete(&conn->rx_login_comp);
+	iscsit_dec_conn_usage_count(conn);
+}
+
+static void iscsi_handle_login_thread_timeout(unsigned long data)
+{
+	struct iscsi_np *np = (struct iscsi_np *) data;
+
+	spin_lock_bh(&np->np_thread_lock);
+	pr_err("iSCSI Login timeout on Network Portal %pISpc\n",
+			&np->np_sockaddr);
+
+	if (np->np_login_timer_flags & ISCSI_TF_STOP) {
+		spin_unlock_bh(&np->np_thread_lock);
+		return;
+	}
+
+	if (np->np_thread)
+		send_sig(SIGINT, np->np_thread, 1);
+
+	np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&np->np_thread_lock);
+}
+
+static void iscsi_start_login_thread_timer(struct iscsi_np *np)
+{
+	/*
+	 * This used the TA_LOGIN_TIMEOUT constant because at this
+	 * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
+	 */
+	spin_lock_bh(&np->np_thread_lock);
+	init_timer(&np->np_login_timer);
+	np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
+	np->np_login_timer.data = (unsigned long)np;
+	np->np_login_timer.function = iscsi_handle_login_thread_timeout;
+	np->np_login_timer_flags &= ~ISCSI_TF_STOP;
+	np->np_login_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&np->np_login_timer);
+
+	pr_debug("Added timeout timer to iSCSI login request for"
+			" %u seconds.\n", TA_LOGIN_TIMEOUT);
+	spin_unlock_bh(&np->np_thread_lock);
+}
+
+static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
+{
+	spin_lock_bh(&np->np_thread_lock);
+	if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&np->np_thread_lock);
+		return;
+	}
+	np->np_login_timer_flags |= ISCSI_TF_STOP;
+	spin_unlock_bh(&np->np_thread_lock);
+
+	del_timer_sync(&np->np_login_timer);
+
+	spin_lock_bh(&np->np_thread_lock);
+	np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&np->np_thread_lock);
+}
+
+int iscsit_setup_np(
+	struct iscsi_np *np,
+	struct sockaddr_storage *sockaddr)
+{
+	struct socket *sock = NULL;
+	int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len;
+
+	switch (np->np_network_transport) {
+	case ISCSI_TCP:
+		np->np_ip_proto = IPPROTO_TCP;
+		np->np_sock_type = SOCK_STREAM;
+		break;
+	case ISCSI_SCTP_TCP:
+		np->np_ip_proto = IPPROTO_SCTP;
+		np->np_sock_type = SOCK_STREAM;
+		break;
+	case ISCSI_SCTP_UDP:
+		np->np_ip_proto = IPPROTO_SCTP;
+		np->np_sock_type = SOCK_SEQPACKET;
+		break;
+	default:
+		pr_err("Unsupported network_transport: %d\n",
+				np->np_network_transport);
+		return -EINVAL;
+	}
+
+	np->np_ip_proto = IPPROTO_TCP;
+	np->np_sock_type = SOCK_STREAM;
+
+	ret = sock_create(sockaddr->ss_family, np->np_sock_type,
+			np->np_ip_proto, &sock);
+	if (ret < 0) {
+		pr_err("sock_create() failed.\n");
+		return ret;
+	}
+	np->np_socket = sock;
+	/*
+	 * Setup the np->np_sockaddr from the passed sockaddr setup
+	 * in iscsi_target_configfs.c code..
+	 */
+	memcpy(&np->np_sockaddr, sockaddr,
+			sizeof(struct sockaddr_storage));
+
+	if (sockaddr->ss_family == AF_INET6)
+		len = sizeof(struct sockaddr_in6);
+	else
+		len = sizeof(struct sockaddr_in);
+	/*
+	 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
+	 */
+	/* FIXME: Someone please explain why this is endian-safe */
+	opt = 1;
+	if (np->np_network_transport == ISCSI_TCP) {
+		ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
+				(char *)&opt, sizeof(opt));
+		if (ret < 0) {
+			pr_err("kernel_setsockopt() for TCP_NODELAY"
+				" failed: %d\n", ret);
+			goto fail;
+		}
+	}
+
+	/* FIXME: Someone please explain why this is endian-safe */
+	ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+			(char *)&opt, sizeof(opt));
+	if (ret < 0) {
+		pr_err("kernel_setsockopt() for SO_REUSEADDR"
+			" failed\n");
+		goto fail;
+	}
+
+	ret = kernel_setsockopt(sock, IPPROTO_IP, IP_FREEBIND,
+			(char *)&opt, sizeof(opt));
+	if (ret < 0) {
+		pr_err("kernel_setsockopt() for IP_FREEBIND"
+			" failed\n");
+		goto fail;
+	}
+
+	ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
+	if (ret < 0) {
+		pr_err("kernel_bind() failed: %d\n", ret);
+		goto fail;
+	}
+
+	ret = kernel_listen(sock, backlog);
+	if (ret != 0) {
+		pr_err("kernel_listen() failed: %d\n", ret);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	np->np_socket = NULL;
+	sock_release(sock);
+	return ret;
+}
+
+int iscsi_target_setup_login_socket(
+	struct iscsi_np *np,
+	struct sockaddr_storage *sockaddr)
+{
+	struct iscsit_transport *t;
+	int rc;
+
+	t = iscsit_get_transport(np->np_network_transport);
+	if (!t)
+		return -EINVAL;
+
+	rc = t->iscsit_setup_np(np, sockaddr);
+	if (rc < 0) {
+		iscsit_put_transport(t);
+		return rc;
+	}
+
+	np->np_transport = t;
+	np->enabled = true;
+	return 0;
+}
+
+int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+{
+	struct socket *new_sock, *sock = np->np_socket;
+	struct sockaddr_in sock_in;
+	struct sockaddr_in6 sock_in6;
+	int rc, err;
+
+	rc = kernel_accept(sock, &new_sock, 0);
+	if (rc < 0)
+		return rc;
+
+	conn->sock = new_sock;
+	conn->login_family = np->np_sockaddr.ss_family;
+
+	if (np->np_sockaddr.ss_family == AF_INET6) {
+		memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
+
+		rc = conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in6, &err, 1);
+		if (!rc) {
+			if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
+				memcpy(&conn->login_sockaddr, &sock_in6, sizeof(sock_in6));
+			} else {
+				/* Pretend to be an ipv4 socket */
+				sock_in.sin_family = AF_INET;
+				sock_in.sin_port = sock_in6.sin6_port;
+				memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
+				memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
+			}
+		}
+
+		rc = conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in6, &err, 0);
+		if (!rc) {
+			if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
+				memcpy(&conn->local_sockaddr, &sock_in6, sizeof(sock_in6));
+			} else {
+				/* Pretend to be an ipv4 socket */
+				sock_in.sin_family = AF_INET;
+				sock_in.sin_port = sock_in6.sin6_port;
+				memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
+				memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
+			}
+		}
+	} else {
+		memset(&sock_in, 0, sizeof(struct sockaddr_in));
+
+		rc = conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in, &err, 1);
+		if (!rc)
+			memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
+
+		rc = conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in, &err, 0);
+		if (!rc)
+			memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
+	}
+
+	return 0;
+}
+
+int iscsit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	struct iscsi_login_req *login_req;
+	u32 padding = 0, payload_length;
+
+	if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
+		return -1;
+
+	login_req = (struct iscsi_login_req *)login->req;
+	payload_length	= ntoh24(login_req->dlength);
+	padding = ((-payload_length) & 3);
+
+	pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+		login_req->flags, login_req->itt, login_req->cmdsn,
+		login_req->exp_statsn, login_req->cid, payload_length);
+	/*
+	 * Setup the initial iscsi_login values from the leading
+	 * login request PDU.
+	 */
+	if (login->first_request) {
+		login_req = (struct iscsi_login_req *)login->req;
+		login->leading_connection = (!login_req->tsih) ? 1 : 0;
+		login->current_stage	= ISCSI_LOGIN_CURRENT_STAGE(login_req->flags);
+		login->version_min	= login_req->min_version;
+		login->version_max	= login_req->max_version;
+		memcpy(login->isid, login_req->isid, 6);
+		login->cmd_sn		= be32_to_cpu(login_req->cmdsn);
+		login->init_task_tag	= login_req->itt;
+		login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
+		login->cid		= be16_to_cpu(login_req->cid);
+		login->tsih		= be16_to_cpu(login_req->tsih);
+	}
+
+	if (iscsi_target_check_login_request(conn, login) < 0)
+		return -1;
+
+	memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+	if (iscsi_login_rx_data(conn, login->req_buf,
+				payload_length + padding) < 0)
+		return -1;
+
+	return 0;
+}
+
+int iscsit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+			u32 length)
+{
+	if (iscsi_login_tx_data(conn, login->rsp, login->rsp_buf, length) < 0)
+		return -1;
+
+	return 0;
+}
+
+static int
+iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
+{
+	int rc;
+
+	if (!t->owner) {
+		conn->conn_transport = t;
+		return 0;
+	}
+
+	rc = try_module_get(t->owner);
+	if (!rc) {
+		pr_err("try_module_get() failed for %s\n", t->name);
+		return -EINVAL;
+	}
+
+	conn->conn_transport = t;
+	return 0;
+}
+
+void iscsi_target_login_sess_out(struct iscsi_conn *conn,
+		struct iscsi_np *np, bool zero_tsih, bool new_sess)
+{
+	if (!new_sess)
+		goto old_sess_out;
+
+	pr_err("iSCSI Login negotiation failed.\n");
+	iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				   ISCSI_LOGIN_STATUS_INIT_ERR);
+	if (!zero_tsih || !conn->sess)
+		goto old_sess_out;
+	if (conn->sess->se_sess)
+		transport_free_session(conn->sess->se_sess);
+	if (conn->sess->session_index != 0) {
+		spin_lock_bh(&sess_idr_lock);
+		idr_remove(&sess_idr, conn->sess->session_index);
+		spin_unlock_bh(&sess_idr_lock);
+	}
+	kfree(conn->sess->sess_ops);
+	kfree(conn->sess);
+	conn->sess = NULL;
+
+old_sess_out:
+	iscsi_stop_login_thread_timer(np);
+	/*
+	 * If login negotiation fails check if the Time2Retain timer
+	 * needs to be restarted.
+	 */
+	if (!zero_tsih && conn->sess) {
+		spin_lock_bh(&conn->sess->conn_lock);
+		if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
+			struct se_portal_group *se_tpg =
+					&conn->tpg->tpg_se_tpg;
+
+			atomic_set(&conn->sess->session_continuation, 0);
+			spin_unlock_bh(&conn->sess->conn_lock);
+			spin_lock_bh(&se_tpg->session_lock);
+			iscsit_start_time2retain_handler(conn->sess);
+			spin_unlock_bh(&se_tpg->session_lock);
+		} else
+			spin_unlock_bh(&conn->sess->conn_lock);
+		iscsit_dec_session_usage_count(conn->sess);
+	}
+
+	if (!IS_ERR(conn->conn_rx_hash.tfm))
+		crypto_free_hash(conn->conn_rx_hash.tfm);
+	if (!IS_ERR(conn->conn_tx_hash.tfm))
+		crypto_free_hash(conn->conn_tx_hash.tfm);
+
+	free_cpumask_var(conn->conn_cpumask);
+
+	kfree(conn->conn_ops);
+
+	if (conn->param_list) {
+		iscsi_release_param_list(conn->param_list);
+		conn->param_list = NULL;
+	}
+	iscsi_target_nego_release(conn);
+
+	if (conn->sock) {
+		sock_release(conn->sock);
+		conn->sock = NULL;
+	}
+
+	if (conn->conn_transport->iscsit_wait_conn)
+		conn->conn_transport->iscsit_wait_conn(conn);
+
+	if (conn->conn_transport->iscsit_free_conn)
+		conn->conn_transport->iscsit_free_conn(conn);
+
+	iscsit_put_transport(conn->conn_transport);
+	kfree(conn);
+}
+
+static int __iscsi_target_login_thread(struct iscsi_np *np)
+{
+	u8 *buffer, zero_tsih = 0;
+	int ret = 0, rc;
+	struct iscsi_conn *conn = NULL;
+	struct iscsi_login *login;
+	struct iscsi_portal_group *tpg = NULL;
+	struct iscsi_login_req *pdu;
+	struct iscsi_tpg_np *tpg_np;
+	bool new_sess = false;
+
+	flush_signals(current);
+
+	spin_lock_bh(&np->np_thread_lock);
+	if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+		spin_unlock_bh(&np->np_thread_lock);
+		complete(&np->np_restart_comp);
+		return 1;
+	} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
+		spin_unlock_bh(&np->np_thread_lock);
+		goto exit;
+	} else {
+		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+	}
+	spin_unlock_bh(&np->np_thread_lock);
+
+	conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+	if (!conn) {
+		pr_err("Could not allocate memory for"
+			" new connection\n");
+		/* Get another socket */
+		return 1;
+	}
+	pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+	conn->conn_state = TARG_CONN_STATE_FREE;
+
+	if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
+		kfree(conn);
+		return 1;
+	}
+
+	rc = np->np_transport->iscsit_accept_np(np, conn);
+	if (rc == -ENOSYS) {
+		complete(&np->np_restart_comp);
+		iscsit_put_transport(conn->conn_transport);
+		kfree(conn);
+		conn = NULL;
+		goto exit;
+	} else if (rc < 0) {
+		spin_lock_bh(&np->np_thread_lock);
+		if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+			np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+			spin_unlock_bh(&np->np_thread_lock);
+			complete(&np->np_restart_comp);
+			iscsit_put_transport(conn->conn_transport);
+			kfree(conn);
+			conn = NULL;
+			/* Get another socket */
+			return 1;
+		}
+		spin_unlock_bh(&np->np_thread_lock);
+		iscsit_put_transport(conn->conn_transport);
+		kfree(conn);
+		conn = NULL;
+		goto out;
+	}
+	/*
+	 * Perform the remaining iSCSI connection initialization items..
+	 */
+	login = iscsi_login_init_conn(conn);
+	if (!login) {
+		goto new_sess_out;
+	}
+
+	iscsi_start_login_thread_timer(np);
+
+	pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
+	conn->conn_state = TARG_CONN_STATE_XPT_UP;
+	/*
+	 * This will process the first login request + payload..
+	 */
+	rc = np->np_transport->iscsit_get_login_rx(conn, login);
+	if (rc == 1)
+		return 1;
+	else if (rc < 0)
+		goto new_sess_out;
+
+	buffer = &login->req[0];
+	pdu = (struct iscsi_login_req *)buffer;
+	/*
+	 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
+	 * when Status-Class != 0.
+	*/
+	conn->login_itt	= pdu->itt;
+
+	spin_lock_bh(&np->np_thread_lock);
+	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+		spin_unlock_bh(&np->np_thread_lock);
+		pr_err("iSCSI Network Portal on %pISpc currently not"
+			" active.\n", &np->np_sockaddr);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+		goto new_sess_out;
+	}
+	spin_unlock_bh(&np->np_thread_lock);
+
+	conn->network_transport = np->np_network_transport;
+
+	pr_debug("Received iSCSI login request from %pISpc on %s Network"
+		" Portal %pISpc\n", &conn->login_sockaddr, np->np_transport->name,
+		&conn->local_sockaddr);
+
+	pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
+	conn->conn_state	= TARG_CONN_STATE_IN_LOGIN;
+
+	if (iscsi_login_check_initiator_version(conn, pdu->max_version,
+			pdu->min_version) < 0)
+		goto new_sess_out;
+
+	zero_tsih = (pdu->tsih == 0x0000);
+	if (zero_tsih) {
+		/*
+		 * This is the leading connection of a new session.
+		 * We wait until after authentication to check for
+		 * session reinstatement.
+		 */
+		if (iscsi_login_zero_tsih_s1(conn, buffer) < 0)
+			goto new_sess_out;
+	} else {
+		/*
+		 * Add a new connection to an existing session.
+		 * We check for a non-existant session in
+		 * iscsi_login_non_zero_tsih_s2() below based
+		 * on ISID/TSIH, but wait until after authentication
+		 * to check for connection reinstatement, etc.
+		 */
+		if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
+			goto new_sess_out;
+	}
+	/*
+	 * SessionType: Discovery
+	 *
+	 * 	Locates Default Portal
+	 *
+	 * SessionType: Normal
+	 *
+	 * 	Locates Target Portal from NP -> Target IQN
+	 */
+	rc = iscsi_target_locate_portal(np, conn, login);
+	if (rc < 0) {
+		tpg = conn->tpg;
+		goto new_sess_out;
+	}
+	login->zero_tsih = zero_tsih;
+
+	if (conn->sess)
+		conn->sess->se_sess->sup_prot_ops =
+			conn->conn_transport->iscsit_get_sup_prot_ops(conn);
+
+	tpg = conn->tpg;
+	if (!tpg) {
+		pr_err("Unable to locate struct iscsi_conn->tpg\n");
+		goto new_sess_out;
+	}
+
+	if (zero_tsih) {
+		if (iscsi_login_zero_tsih_s2(conn) < 0)
+			goto new_sess_out;
+	} else {
+		if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0)
+			goto old_sess_out;
+	}
+
+	ret = iscsi_target_start_negotiation(login, conn);
+	if (ret < 0)
+		goto new_sess_out;
+
+	iscsi_stop_login_thread_timer(np);
+
+	if (ret == 1) {
+		tpg_np = conn->tpg_np;
+
+		iscsi_post_login_handler(np, conn, zero_tsih);
+		iscsit_deaccess_np(np, tpg, tpg_np);
+	}
+
+	tpg = NULL;
+	tpg_np = NULL;
+	/* Get another socket */
+	return 1;
+
+new_sess_out:
+	new_sess = true;
+old_sess_out:
+	tpg_np = conn->tpg_np;
+	iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess);
+	new_sess = false;
+
+	if (tpg) {
+		iscsit_deaccess_np(np, tpg, tpg_np);
+		tpg = NULL;
+		tpg_np = NULL;
+	}
+
+out:
+	return 1;
+
+exit:
+	iscsi_stop_login_thread_timer(np);
+	spin_lock_bh(&np->np_thread_lock);
+	np->np_thread_state = ISCSI_NP_THREAD_EXIT;
+	spin_unlock_bh(&np->np_thread_lock);
+
+	return 0;
+}
+
+int iscsi_target_login_thread(void *arg)
+{
+	struct iscsi_np *np = arg;
+	int ret;
+
+	allow_signal(SIGINT);
+
+	while (1) {
+		ret = __iscsi_target_login_thread(np);
+		/*
+		 * We break and exit here unless another sock_accept() call
+		 * is expected.
+		 */
+		if (ret != 1)
+			break;
+	}
+
+	while (!kthread_should_stop()) {
+		msleep(100);
+	}
+
+	return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
new file mode 100644
index 0000000..b597aa2
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -0,0 +1,21 @@
+#ifndef ISCSI_TARGET_LOGIN_H
+#define ISCSI_TARGET_LOGIN_H
+
+extern int iscsi_login_setup_crypto(struct iscsi_conn *);
+extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
+extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
+extern int iscsit_setup_np(struct iscsi_np *,
+				struct sockaddr_storage *);
+extern int iscsi_target_setup_login_socket(struct iscsi_np *,
+				struct sockaddr_storage *);
+extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
+extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
+extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
+extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+extern int iscsit_start_kthreads(struct iscsi_conn *);
+extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
+extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
+				bool, bool);
+extern int iscsi_target_login_thread(void *);
+
+#endif   /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
new file mode 100644
index 0000000..58c629a
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -0,0 +1,1354 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI Parameter negotiation.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/ctype.h>
+#include <linux/kthread.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_auth.h"
+
+#define MAX_LOGIN_PDUS  7
+#define TEXT_LEN	4096
+
+void convert_null_to_semi(char *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		if (buf[i] == '\0')
+			buf[i] = ';';
+}
+
+static int strlen_semi(char *buf)
+{
+	int i = 0;
+
+	while (buf[i] != '\0') {
+		if (buf[i] == ';')
+			return i;
+		i++;
+	}
+
+	return -1;
+}
+
+int extract_param(
+	const char *in_buf,
+	const char *pattern,
+	unsigned int max_length,
+	char *out_buf,
+	unsigned char *type)
+{
+	char *ptr;
+	int len;
+
+	if (!in_buf || !pattern || !out_buf || !type)
+		return -1;
+
+	ptr = strstr(in_buf, pattern);
+	if (!ptr)
+		return -1;
+
+	ptr = strstr(ptr, "=");
+	if (!ptr)
+		return -1;
+
+	ptr += 1;
+	if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
+		ptr += 2; /* skip 0x */
+		*type = HEX;
+	} else
+		*type = DECIMAL;
+
+	len = strlen_semi(ptr);
+	if (len < 0)
+		return -1;
+
+	if (len >= max_length) {
+		pr_err("Length of input: %d exceeds max_length:"
+			" %d\n", len, max_length);
+		return -1;
+	}
+	memcpy(out_buf, ptr, len);
+	out_buf[len] = '\0';
+
+	return 0;
+}
+
+static u32 iscsi_handle_authentication(
+	struct iscsi_conn *conn,
+	char *in_buf,
+	char *out_buf,
+	int in_length,
+	int *out_length,
+	unsigned char *authtype)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_auth *auth;
+	struct iscsi_node_acl *iscsi_nacl;
+	struct iscsi_portal_group *iscsi_tpg;
+	struct se_node_acl *se_nacl;
+
+	if (!sess->sess_ops->SessionType) {
+		/*
+		 * For SessionType=Normal
+		 */
+		se_nacl = conn->sess->se_sess->se_node_acl;
+		if (!se_nacl) {
+			pr_err("Unable to locate struct se_node_acl for"
+					" CHAP auth\n");
+			return -1;
+		}
+		iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
+				se_node_acl);
+		if (!iscsi_nacl) {
+			pr_err("Unable to locate struct iscsi_node_acl for"
+					" CHAP auth\n");
+			return -1;
+		}
+
+		if (se_nacl->dynamic_node_acl) {
+			iscsi_tpg = container_of(se_nacl->se_tpg,
+					struct iscsi_portal_group, tpg_se_tpg);
+
+			auth = &iscsi_tpg->tpg_demo_auth;
+		} else {
+			iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
+						  se_node_acl);
+
+			auth = &iscsi_nacl->node_auth;
+		}
+	} else {
+		/*
+		 * For SessionType=Discovery
+		 */
+		auth = &iscsit_global->discovery_acl.node_auth;
+	}
+
+	if (strstr("CHAP", authtype))
+		strcpy(conn->sess->auth_type, "CHAP");
+	else
+		strcpy(conn->sess->auth_type, NONE);
+
+	if (strstr("None", authtype))
+		return 1;
+#ifdef CANSRP
+	else if (strstr("SRP", authtype))
+		return srp_main_loop(conn, auth, in_buf, out_buf,
+				&in_length, out_length);
+#endif
+	else if (strstr("CHAP", authtype))
+		return chap_main_loop(conn, auth, in_buf, out_buf,
+				&in_length, out_length);
+	else if (strstr("SPKM1", authtype))
+		return 2;
+	else if (strstr("SPKM2", authtype))
+		return 2;
+	else if (strstr("KRB5", authtype))
+		return 2;
+	else
+		return 2;
+}
+
+static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
+{
+	kfree(conn->auth_protocol);
+}
+
+int iscsi_target_check_login_request(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	int req_csg, req_nsg;
+	u32 payload_length;
+	struct iscsi_login_req *login_req;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	payload_length = ntoh24(login_req->dlength);
+
+	switch (login_req->opcode & ISCSI_OPCODE_MASK) {
+	case ISCSI_OP_LOGIN:
+		break;
+	default:
+		pr_err("Received unknown opcode 0x%02x.\n",
+				login_req->opcode & ISCSI_OPCODE_MASK);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
+	    (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+		pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE"
+			" and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	req_csg = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags);
+	req_nsg = ISCSI_LOGIN_NEXT_STAGE(login_req->flags);
+
+	if (req_csg != login->current_stage) {
+		pr_err("Initiator unexpectedly changed login stage"
+			" from %d to %d, login failed.\n", login->current_stage,
+			req_csg);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if ((req_nsg == 2) || (req_csg >= 2) ||
+	   ((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
+	    (req_nsg <= req_csg))) {
+		pr_err("Illegal login_req->flags Combination, CSG: %d,"
+			" NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg,
+			req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT));
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if ((login_req->max_version != login->version_max) ||
+	    (login_req->min_version != login->version_min)) {
+		pr_err("Login request changed Version Max/Nin"
+			" unexpectedly to 0x%02x/0x%02x, protocol error\n",
+			login_req->max_version, login_req->min_version);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if (memcmp(login_req->isid, login->isid, 6) != 0) {
+		pr_err("Login request changed ISID unexpectedly,"
+				" protocol error.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if (login_req->itt != login->init_task_tag) {
+		pr_err("Login request changed ITT unexpectedly to"
+			" 0x%08x, protocol error.\n", login_req->itt);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if (payload_length > MAX_KEY_VALUE_PAIRS) {
+		pr_err("Login request payload exceeds default"
+			" MaxRecvDataSegmentLength: %u, protocol error.\n",
+				MAX_KEY_VALUE_PAIRS);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int iscsi_target_check_first_request(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	struct iscsi_param *param = NULL;
+	struct se_node_acl *se_nacl;
+
+	login->first_request = 0;
+
+	list_for_each_entry(param, &conn->param_list->param_list, p_list) {
+		if (!strncmp(param->name, SESSIONTYPE, 11)) {
+			if (!IS_PSTATE_ACCEPTOR(param)) {
+				pr_err("SessionType key not received"
+					" in first login request.\n");
+				iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+					ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+				return -1;
+			}
+			if (!strncmp(param->value, DISCOVERY, 9))
+				return 0;
+		}
+
+		if (!strncmp(param->name, INITIATORNAME, 13)) {
+			if (!IS_PSTATE_ACCEPTOR(param)) {
+				if (!login->leading_connection)
+					continue;
+
+				pr_err("InitiatorName key not received"
+					" in first login request.\n");
+				iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+					ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+				return -1;
+			}
+
+			/*
+			 * For non-leading connections, double check that the
+			 * received InitiatorName matches the existing session's
+			 * struct iscsi_node_acl.
+			 */
+			if (!login->leading_connection) {
+				se_nacl = conn->sess->se_sess->se_node_acl;
+				if (!se_nacl) {
+					pr_err("Unable to locate"
+						" struct se_node_acl\n");
+					iscsit_tx_login_rsp(conn,
+							ISCSI_STATUS_CLS_INITIATOR_ERR,
+							ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
+					return -1;
+				}
+
+				if (strcmp(param->value,
+						se_nacl->initiatorname)) {
+					pr_err("Incorrect"
+						" InitiatorName: %s for this"
+						" iSCSI Initiator Node.\n",
+						param->value);
+					iscsit_tx_login_rsp(conn,
+							ISCSI_STATUS_CLS_INITIATOR_ERR,
+							ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
+					return -1;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	u32 padding = 0;
+	struct iscsi_login_rsp *login_rsp;
+
+	login_rsp = (struct iscsi_login_rsp *) login->rsp;
+
+	login_rsp->opcode		= ISCSI_OP_LOGIN_RSP;
+	hton24(login_rsp->dlength, login->rsp_length);
+	memcpy(login_rsp->isid, login->isid, 6);
+	login_rsp->tsih			= cpu_to_be16(login->tsih);
+	login_rsp->itt			= login->init_task_tag;
+	login_rsp->statsn		= cpu_to_be32(conn->stat_sn++);
+	login_rsp->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	login_rsp->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
+
+	pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
+		" ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
+		" %u\n", login_rsp->flags, (__force u32)login_rsp->itt,
+		ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
+		ntohl(login_rsp->statsn), login->rsp_length);
+
+	padding = ((-login->rsp_length) & 3);
+	/*
+	 * Before sending the last login response containing the transition
+	 * bit for full-feature-phase, go ahead and start up TX/RX threads
+	 * now to avoid potential resource allocation failures after the
+	 * final login response has been sent.
+	 */
+	if (login->login_complete) {
+		int rc = iscsit_start_kthreads(conn);
+		if (rc) {
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+					    ISCSI_LOGIN_STATUS_NO_RESOURCES);
+			return -1;
+		}
+	}
+
+	if (conn->conn_transport->iscsit_put_login_tx(conn, login,
+					login->rsp_length + padding) < 0)
+		goto err;
+
+	login->rsp_length		= 0;
+
+	return 0;
+
+err:
+	if (login->login_complete) {
+		if (conn->rx_thread && conn->rx_thread_active) {
+			send_sig(SIGINT, conn->rx_thread, 1);
+			complete(&conn->rx_login_comp);
+			kthread_stop(conn->rx_thread);
+		}
+		if (conn->tx_thread && conn->tx_thread_active) {
+			send_sig(SIGINT, conn->tx_thread, 1);
+			kthread_stop(conn->tx_thread);
+		}
+		spin_lock(&iscsit_global->ts_bitmap_lock);
+		bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+				      get_order(1));
+		spin_unlock(&iscsit_global->ts_bitmap_lock);
+	}
+	return -1;
+}
+
+static void iscsi_target_sk_data_ready(struct sock *sk)
+{
+	struct iscsi_conn *conn = sk->sk_user_data;
+	bool rc;
+
+	pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn);
+
+	write_lock_bh(&sk->sk_callback_lock);
+	if (!sk->sk_user_data) {
+		write_unlock_bh(&sk->sk_callback_lock);
+		return;
+	}
+	if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
+		write_unlock_bh(&sk->sk_callback_lock);
+		pr_debug("Got LOGIN_FLAGS_READY=0, conn: %p >>>>\n", conn);
+		return;
+	}
+	if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+		write_unlock_bh(&sk->sk_callback_lock);
+		pr_debug("Got LOGIN_FLAGS_CLOSED=1, conn: %p >>>>\n", conn);
+		return;
+	}
+	if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
+		write_unlock_bh(&sk->sk_callback_lock);
+		pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn);
+		return;
+	}
+
+	rc = schedule_delayed_work(&conn->login_work, 0);
+	if (!rc) {
+		pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work"
+			 " got false\n");
+	}
+	write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void iscsi_target_sk_state_change(struct sock *);
+
+static void iscsi_target_set_sock_callbacks(struct iscsi_conn *conn)
+{
+	struct sock *sk;
+
+	if (!conn->sock)
+		return;
+
+	sk = conn->sock->sk;
+	pr_debug("Entering iscsi_target_set_sock_callbacks: conn: %p\n", conn);
+
+	write_lock_bh(&sk->sk_callback_lock);
+	sk->sk_user_data = conn;
+	conn->orig_data_ready = sk->sk_data_ready;
+	conn->orig_state_change = sk->sk_state_change;
+	sk->sk_data_ready = iscsi_target_sk_data_ready;
+	sk->sk_state_change = iscsi_target_sk_state_change;
+	write_unlock_bh(&sk->sk_callback_lock);
+
+	sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ;
+	sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ;
+}
+
+static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
+{
+	struct sock *sk;
+
+	if (!conn->sock)
+		return;
+
+	sk = conn->sock->sk;
+	pr_debug("Entering iscsi_target_restore_sock_callbacks: conn: %p\n", conn);
+
+	write_lock_bh(&sk->sk_callback_lock);
+	if (!sk->sk_user_data) {
+		write_unlock_bh(&sk->sk_callback_lock);
+		return;
+	}
+	sk->sk_user_data = NULL;
+	sk->sk_data_ready = conn->orig_data_ready;
+	sk->sk_state_change = conn->orig_state_change;
+	write_unlock_bh(&sk->sk_callback_lock);
+
+	sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+	sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+}
+
+static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
+
+static bool __iscsi_target_sk_check_close(struct sock *sk)
+{
+	if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
+		pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
+			"returning FALSE\n");
+		return true;
+	}
+	return false;
+}
+
+static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
+{
+	bool state = false;
+
+	if (conn->sock) {
+		struct sock *sk = conn->sock->sk;
+
+		read_lock_bh(&sk->sk_callback_lock);
+		state = (__iscsi_target_sk_check_close(sk) ||
+			 test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+		read_unlock_bh(&sk->sk_callback_lock);
+	}
+	return state;
+}
+
+static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
+{
+	bool state = false;
+
+	if (conn->sock) {
+		struct sock *sk = conn->sock->sk;
+
+		read_lock_bh(&sk->sk_callback_lock);
+		state = test_bit(flag, &conn->login_flags);
+		read_unlock_bh(&sk->sk_callback_lock);
+	}
+	return state;
+}
+
+static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
+{
+	bool state = false;
+
+	if (conn->sock) {
+		struct sock *sk = conn->sock->sk;
+
+		write_lock_bh(&sk->sk_callback_lock);
+		state = (__iscsi_target_sk_check_close(sk) ||
+			 test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+		if (!state)
+			clear_bit(flag, &conn->login_flags);
+		write_unlock_bh(&sk->sk_callback_lock);
+	}
+	return state;
+}
+
+static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	struct iscsi_np *np = login->np;
+	bool zero_tsih = login->zero_tsih;
+
+	iscsi_remove_failed_auth_entry(conn);
+	iscsi_target_nego_release(conn);
+	iscsi_target_login_sess_out(conn, np, zero_tsih, true);
+}
+
+static void iscsi_target_login_timeout(unsigned long data)
+{
+	struct iscsi_conn *conn = (struct iscsi_conn *)data;
+
+	pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
+
+	if (conn->login_kworker) {
+		pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
+			 conn->login_kworker->comm, conn->login_kworker->pid);
+		send_sig(SIGINT, conn->login_kworker, 1);
+	}
+}
+
+static void iscsi_target_do_login_rx(struct work_struct *work)
+{
+	struct iscsi_conn *conn = container_of(work,
+				struct iscsi_conn, login_work.work);
+	struct iscsi_login *login = conn->login;
+	struct iscsi_np *np = login->np;
+	struct iscsi_portal_group *tpg = conn->tpg;
+	struct iscsi_tpg_np *tpg_np = conn->tpg_np;
+	struct timer_list login_timer;
+	int rc, zero_tsih = login->zero_tsih;
+	bool state;
+
+	pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
+			conn, current->comm, current->pid);
+	/*
+	 * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
+	 * before initial PDU processing in iscsi_target_start_negotiation()
+	 * has completed, go ahead and retry until it's cleared.
+	 *
+	 * Otherwise if the TCP connection drops while this is occuring,
+	 * iscsi_target_start_negotiation() will detect the failure, call
+	 * cancel_delayed_work_sync(&conn->login_work), and cleanup the
+	 * remaining iscsi connection resources from iscsi_np process context.
+	 */
+	if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
+		schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
+		return;
+	}
+
+	spin_lock(&tpg->tpg_state_lock);
+	state = (tpg->tpg_state == TPG_STATE_ACTIVE);
+	spin_unlock(&tpg->tpg_state_lock);
+
+	if (!state) {
+		pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
+		goto err;
+	}
+
+	if (iscsi_target_sk_check_close(conn)) {
+		pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+		goto err;
+	}
+
+	conn->login_kworker = current;
+	allow_signal(SIGINT);
+
+	init_timer(&login_timer);
+	login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
+	login_timer.data = (unsigned long)conn;
+	login_timer.function = iscsi_target_login_timeout;
+	add_timer(&login_timer);
+	pr_debug("Starting login_timer for %s/%d\n", current->comm, current->pid);
+
+	rc = conn->conn_transport->iscsit_get_login_rx(conn, login);
+	del_timer_sync(&login_timer);
+	flush_signals(current);
+	conn->login_kworker = NULL;
+
+	if (rc < 0)
+		goto err;
+
+	pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
+			conn, current->comm, current->pid);
+
+	rc = iscsi_target_do_login(conn, login);
+	if (rc < 0) {
+		goto err;
+	} else if (!rc) {
+		if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
+			goto err;
+	} else if (rc == 1) {
+		iscsi_target_nego_release(conn);
+		iscsi_post_login_handler(np, conn, zero_tsih);
+		iscsit_deaccess_np(np, tpg, tpg_np);
+	}
+	return;
+
+err:
+	iscsi_target_restore_sock_callbacks(conn);
+	iscsi_target_login_drop(conn, login);
+	iscsit_deaccess_np(np, tpg, tpg_np);
+}
+
+static void iscsi_target_do_cleanup(struct work_struct *work)
+{
+	struct iscsi_conn *conn = container_of(work,
+				struct iscsi_conn, login_cleanup_work.work);
+	struct sock *sk = conn->sock->sk;
+	struct iscsi_login *login = conn->login;
+	struct iscsi_np *np = login->np;
+	struct iscsi_portal_group *tpg = conn->tpg;
+	struct iscsi_tpg_np *tpg_np = conn->tpg_np;
+
+	pr_debug("Entering iscsi_target_do_cleanup\n");
+
+	cancel_delayed_work_sync(&conn->login_work);
+	conn->orig_state_change(sk);
+
+	iscsi_target_restore_sock_callbacks(conn);
+	iscsi_target_login_drop(conn, login);
+	iscsit_deaccess_np(np, tpg, tpg_np);
+
+	pr_debug("iscsi_target_do_cleanup done()\n");
+}
+
+static void iscsi_target_sk_state_change(struct sock *sk)
+{
+	struct iscsi_conn *conn;
+	void (*orig_state_change)(struct sock *);
+	bool state;
+
+	pr_debug("Entering iscsi_target_sk_state_change\n");
+
+	write_lock_bh(&sk->sk_callback_lock);
+	conn = sk->sk_user_data;
+	if (!conn) {
+		write_unlock_bh(&sk->sk_callback_lock);
+		return;
+	}
+	orig_state_change = conn->orig_state_change;
+
+	if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
+		pr_debug("Got LOGIN_FLAGS_READY=0 sk_state_change conn: %p\n",
+			 conn);
+		write_unlock_bh(&sk->sk_callback_lock);
+		orig_state_change(sk);
+		return;
+	}
+	state = __iscsi_target_sk_check_close(sk);
+	pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
+
+	if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
+		pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
+			 " conn: %p\n", conn);
+		if (state)
+			set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+		write_unlock_bh(&sk->sk_callback_lock);
+		orig_state_change(sk);
+		return;
+	}
+	if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+		pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
+			 conn);
+		write_unlock_bh(&sk->sk_callback_lock);
+		orig_state_change(sk);
+		return;
+	}
+	/*
+	 * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
+	 * but only queue conn->login_work -> iscsi_target_do_login_rx()
+	 * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
+	 *
+	 * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
+	 * will detect the dropped TCP connection from delayed workqueue context.
+	 *
+	 * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
+	 * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
+	 * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
+	 * via iscsi_target_sk_check_and_clear() is responsible for detecting the
+	 * dropped TCP connection in iscsi_np process context, and cleaning up
+	 * the remaining iscsi connection resources.
+	 */
+	if (state) {
+		pr_debug("iscsi_target_sk_state_change got failed state\n");
+		set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+		state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+		write_unlock_bh(&sk->sk_callback_lock);
+
+		orig_state_change(sk);
+
+		if (!state)
+			schedule_delayed_work(&conn->login_work, 0);
+		return;
+	}
+	write_unlock_bh(&sk->sk_callback_lock);
+
+	orig_state_change(sk);
+}
+
+/*
+ *	NOTE: We check for existing sessions or connections AFTER the initiator
+ *	has been successfully authenticated in order to protect against faked
+ *	ISID/TSIH combinations.
+ */
+static int iscsi_target_check_for_existing_instances(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	if (login->checked_for_existing)
+		return 0;
+
+	login->checked_for_existing = 1;
+
+	if (!login->tsih)
+		return iscsi_check_for_session_reinstatement(conn);
+	else
+		return iscsi_login_post_auth_non_zero_tsih(conn, login->cid,
+				login->initial_exp_statsn);
+}
+
+static int iscsi_target_do_authentication(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	int authret;
+	u32 payload_length;
+	struct iscsi_param *param;
+	struct iscsi_login_req *login_req;
+	struct iscsi_login_rsp *login_rsp;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	login_rsp = (struct iscsi_login_rsp *) login->rsp;
+	payload_length = ntoh24(login_req->dlength);
+
+	param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+	if (!param)
+		return -1;
+
+	authret = iscsi_handle_authentication(
+			conn,
+			login->req_buf,
+			login->rsp_buf,
+			payload_length,
+			&login->rsp_length,
+			param->value);
+	switch (authret) {
+	case 0:
+		pr_debug("Received OK response"
+		" from LIO Authentication, continuing.\n");
+		break;
+	case 1:
+		pr_debug("iSCSI security negotiation"
+			" completed successfully.\n");
+		login->auth_complete = 1;
+		if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
+		    (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+			login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
+					     ISCSI_FLAG_LOGIN_TRANSIT);
+			login->current_stage = 1;
+		}
+		return iscsi_target_check_for_existing_instances(
+				conn, login);
+	case 2:
+		pr_err("Security negotiation"
+			" failed.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_AUTH_FAILED);
+		return -1;
+	default:
+		pr_err("Received unknown error %d from LIO"
+				" Authentication\n", authret);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_TARGET_ERROR);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int iscsi_target_handle_csg_zero(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	int ret;
+	u32 payload_length;
+	struct iscsi_param *param;
+	struct iscsi_login_req *login_req;
+	struct iscsi_login_rsp *login_rsp;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	login_rsp = (struct iscsi_login_rsp *) login->rsp;
+	payload_length = ntoh24(login_req->dlength);
+
+	param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+	if (!param)
+		return -1;
+
+	ret = iscsi_decode_text_input(
+			PHASE_SECURITY|PHASE_DECLARATIVE,
+			SENDER_INITIATOR|SENDER_RECEIVER,
+			login->req_buf,
+			payload_length,
+			conn);
+	if (ret < 0)
+		return -1;
+
+	if (ret > 0) {
+		if (login->auth_complete) {
+			pr_err("Initiator has already been"
+				" successfully authenticated, but is still"
+				" sending %s keys.\n", param->value);
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+					ISCSI_LOGIN_STATUS_INIT_ERR);
+			return -1;
+		}
+
+		goto do_auth;
+	} else if (!payload_length) {
+		pr_err("Initiator sent zero length security payload,"
+		       " login failed\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				    ISCSI_LOGIN_STATUS_AUTH_FAILED);
+		return -1;
+	}
+
+	if (login->first_request)
+		if (iscsi_target_check_first_request(conn, login) < 0)
+			return -1;
+
+	ret = iscsi_encode_text_output(
+			PHASE_SECURITY|PHASE_DECLARATIVE,
+			SENDER_TARGET,
+			login->rsp_buf,
+			&login->rsp_length,
+			conn->param_list,
+			conn->tpg->tpg_attrib.login_keys_workaround);
+	if (ret < 0)
+		return -1;
+
+	if (!iscsi_check_negotiated_keys(conn->param_list)) {
+		if (conn->tpg->tpg_attrib.authentication &&
+		    !strncmp(param->value, NONE, 4)) {
+			pr_err("Initiator sent AuthMethod=None but"
+				" Target is enforcing iSCSI Authentication,"
+					" login failed.\n");
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+					ISCSI_LOGIN_STATUS_AUTH_FAILED);
+			return -1;
+		}
+
+		if (conn->tpg->tpg_attrib.authentication &&
+		    !login->auth_complete)
+			return 0;
+
+		if (strncmp(param->value, NONE, 4) && !login->auth_complete)
+			return 0;
+
+		if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
+		    (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+			login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
+					    ISCSI_FLAG_LOGIN_TRANSIT;
+			login->current_stage = 1;
+		}
+	}
+
+	return 0;
+do_auth:
+	return iscsi_target_do_authentication(conn, login);
+}
+
+static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	int ret;
+	u32 payload_length;
+	struct iscsi_login_req *login_req;
+	struct iscsi_login_rsp *login_rsp;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	login_rsp = (struct iscsi_login_rsp *) login->rsp;
+	payload_length = ntoh24(login_req->dlength);
+
+	ret = iscsi_decode_text_input(
+			PHASE_OPERATIONAL|PHASE_DECLARATIVE,
+			SENDER_INITIATOR|SENDER_RECEIVER,
+			login->req_buf,
+			payload_length,
+			conn);
+	if (ret < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if (login->first_request)
+		if (iscsi_target_check_first_request(conn, login) < 0)
+			return -1;
+
+	if (iscsi_target_check_for_existing_instances(conn, login) < 0)
+		return -1;
+
+	ret = iscsi_encode_text_output(
+			PHASE_OPERATIONAL|PHASE_DECLARATIVE,
+			SENDER_TARGET,
+			login->rsp_buf,
+			&login->rsp_length,
+			conn->param_list,
+			conn->tpg->tpg_attrib.login_keys_workaround);
+	if (ret < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if (!login->auth_complete &&
+	     conn->tpg->tpg_attrib.authentication) {
+		pr_err("Initiator is requesting CSG: 1, has not been"
+			 " successfully authenticated, and the Target is"
+			" enforcing iSCSI Authentication, login failed.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_AUTH_FAILED);
+		return -1;
+	}
+
+	if (!iscsi_check_negotiated_keys(conn->param_list))
+		if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) &&
+		    (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT))
+			login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 |
+					    ISCSI_FLAG_LOGIN_TRANSIT;
+
+	return 0;
+}
+
+static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	int pdu_count = 0;
+	struct iscsi_login_req *login_req;
+	struct iscsi_login_rsp *login_rsp;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	login_rsp = (struct iscsi_login_rsp *) login->rsp;
+
+	while (1) {
+		if (++pdu_count > MAX_LOGIN_PDUS) {
+			pr_err("MAX_LOGIN_PDUS count reached.\n");
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+					ISCSI_LOGIN_STATUS_TARGET_ERROR);
+			return -1;
+		}
+
+		switch (ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)) {
+		case 0:
+			login_rsp->flags &= ~ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK;
+			if (iscsi_target_handle_csg_zero(conn, login) < 0)
+				return -1;
+			break;
+		case 1:
+			login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1;
+			if (iscsi_target_handle_csg_one(conn, login) < 0)
+				return -1;
+			if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+				/*
+				 * Check to make sure the TCP connection has not
+				 * dropped asynchronously while session reinstatement
+				 * was occuring in this kthread context, before
+				 * transitioning to full feature phase operation.
+				 */
+				if (iscsi_target_sk_check_close(conn))
+					return -1;
+
+				login->tsih = conn->sess->tsih;
+				login->login_complete = 1;
+				iscsi_target_restore_sock_callbacks(conn);
+				if (iscsi_target_do_tx_login_io(conn,
+						login) < 0)
+					return -1;
+				return 1;
+			}
+			break;
+		default:
+			pr_err("Illegal CSG: %d received from"
+				" Initiator, protocol error.\n",
+				ISCSI_LOGIN_CURRENT_STAGE(login_req->flags));
+			break;
+		}
+
+		if (iscsi_target_do_tx_login_io(conn, login) < 0)
+			return -1;
+
+		if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+			login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
+			login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static void iscsi_initiatorname_tolower(
+	char *param_buf)
+{
+	char *c;
+	u32 iqn_size = strlen(param_buf), i;
+
+	for (i = 0; i < iqn_size; i++) {
+		c = &param_buf[i];
+		if (!isupper(*c))
+			continue;
+
+		*c = tolower(*c);
+	}
+}
+
+/*
+ * Processes the first Login Request..
+ */
+int iscsi_target_locate_portal(
+	struct iscsi_np *np,
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
+	char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_tiqn *tiqn;
+	struct iscsi_tpg_np *tpg_np = NULL;
+	struct iscsi_login_req *login_req;
+	struct se_node_acl *se_nacl;
+	u32 payload_length, queue_depth = 0;
+	int sessiontype = 0, ret = 0, tag_num, tag_size;
+
+	INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx);
+	INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup);
+	iscsi_target_set_sock_callbacks(conn);
+
+	login->np = np;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	payload_length = ntoh24(login_req->dlength);
+
+	tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
+	if (!tmpbuf) {
+		pr_err("Unable to allocate memory for tmpbuf.\n");
+		return -1;
+	}
+
+	memcpy(tmpbuf, login->req_buf, payload_length);
+	tmpbuf[payload_length] = '\0';
+	start = tmpbuf;
+	end = (start + payload_length);
+
+	/*
+	 * Locate the initial keys expected from the Initiator node in
+	 * the first login request in order to progress with the login phase.
+	 */
+	while (start < end) {
+		if (iscsi_extract_key_value(start, &key, &value) < 0) {
+			ret = -1;
+			goto out;
+		}
+
+		if (!strncmp(key, "InitiatorName", 13))
+			i_buf = value;
+		else if (!strncmp(key, "SessionType", 11))
+			s_buf = value;
+		else if (!strncmp(key, "TargetName", 10))
+			t_buf = value;
+
+		start += strlen(key) + strlen(value) + 2;
+	}
+	/*
+	 * See 5.3.  Login Phase.
+	 */
+	if (!i_buf) {
+		pr_err("InitiatorName key not received"
+			" in first login request.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+			ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+		ret = -1;
+		goto out;
+	}
+	/*
+	 * Convert the incoming InitiatorName to lowercase following
+	 * RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs
+	 * are NOT case sensitive.
+	 */
+	iscsi_initiatorname_tolower(i_buf);
+
+	if (!s_buf) {
+		if (!login->leading_connection)
+			goto get_target;
+
+		pr_err("SessionType key not received"
+			" in first login request.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+			ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+		ret = -1;
+		goto out;
+	}
+
+	/*
+	 * Use default portal group for discovery sessions.
+	 */
+	sessiontype = strncmp(s_buf, DISCOVERY, 9);
+	if (!sessiontype) {
+		conn->tpg = iscsit_global->discovery_tpg;
+		if (!login->leading_connection)
+			goto get_target;
+
+		sess->sess_ops->SessionType = 1;
+		/*
+		 * Setup crc32c modules from libcrypto
+		 */
+		if (iscsi_login_setup_crypto(conn) < 0) {
+			pr_err("iscsi_login_setup_crypto() failed\n");
+			ret = -1;
+			goto out;
+		}
+		/*
+		 * Serialize access across the discovery struct iscsi_portal_group to
+		 * process login attempt.
+		 */
+		if (iscsit_access_np(np, conn->tpg) < 0) {
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+			ret = -1;
+			goto out;
+		}
+		ret = 0;
+		goto alloc_tags;
+	}
+
+get_target:
+	if (!t_buf) {
+		pr_err("TargetName key not received"
+			" in first login request while"
+			" SessionType=Normal.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+			ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+		ret = -1;
+		goto out;
+	}
+
+	/*
+	 * Locate Target IQN from Storage Node.
+	 */
+	tiqn = iscsit_get_tiqn_for_login(t_buf);
+	if (!tiqn) {
+		pr_err("Unable to locate Target IQN: %s in"
+			" Storage Node\n", t_buf);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+		ret = -1;
+		goto out;
+	}
+	pr_debug("Located Storage Object: %s\n", tiqn->tiqn);
+
+	/*
+	 * Locate Target Portal Group from Storage Node.
+	 */
+	conn->tpg = iscsit_get_tpg_from_np(tiqn, np, &tpg_np);
+	if (!conn->tpg) {
+		pr_err("Unable to locate Target Portal Group"
+				" on %s\n", tiqn->tiqn);
+		iscsit_put_tiqn_for_login(tiqn);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+		ret = -1;
+		goto out;
+	}
+	conn->tpg_np = tpg_np;
+	pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
+	/*
+	 * Setup crc32c modules from libcrypto
+	 */
+	if (iscsi_login_setup_crypto(conn) < 0) {
+		pr_err("iscsi_login_setup_crypto() failed\n");
+		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
+		iscsit_put_tiqn_for_login(tiqn);
+		conn->tpg = NULL;
+		ret = -1;
+		goto out;
+	}
+	/*
+	 * Serialize access across the struct iscsi_portal_group to
+	 * process login attempt.
+	 */
+	if (iscsit_access_np(np, conn->tpg) < 0) {
+		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
+		iscsit_put_tiqn_for_login(tiqn);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+		conn->tpg = NULL;
+		ret = -1;
+		goto out;
+	}
+
+	/*
+	 * conn->sess->node_acl will be set when the referenced
+	 * struct iscsi_session is located from received ISID+TSIH in
+	 * iscsi_login_non_zero_tsih_s2().
+	 */
+	if (!login->leading_connection) {
+		ret = 0;
+		goto out;
+	}
+
+	/*
+	 * This value is required in iscsi_login_zero_tsih_s2()
+	 */
+	sess->sess_ops->SessionType = 0;
+
+	/*
+	 * Locate incoming Initiator IQN reference from Storage Node.
+	 */
+	sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+			&conn->tpg->tpg_se_tpg, i_buf);
+	if (!sess->se_sess->se_node_acl) {
+		pr_err("iSCSI Initiator Node: %s is not authorized to"
+			" access iSCSI target portal group: %hu.\n",
+				i_buf, conn->tpg->tpgt);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_TGT_FORBIDDEN);
+		ret = -1;
+		goto out;
+	}
+	se_nacl = sess->se_sess->se_node_acl;
+	queue_depth = se_nacl->queue_depth;
+	/*
+	 * Setup pre-allocated tags based upon allowed per NodeACL CmdSN
+	 * depth for non immediate commands, plus extra tags for immediate
+	 * commands.
+	 *
+	 * Also enforce a ISCSIT_MIN_TAGS to prevent unnecessary contention
+	 * in per-cpu-ida tag allocation logic + small queue_depth.
+	 */
+alloc_tags:
+	tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
+	tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
+	tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
+
+	ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
+	if (ret < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				    ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		ret = -1;
+	}
+out:
+	kfree(tmpbuf);
+	return ret;
+}
+
+int iscsi_target_start_negotiation(
+	struct iscsi_login *login,
+	struct iscsi_conn *conn)
+{
+	int ret;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+		write_lock_bh(&sk->sk_callback_lock);
+		set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+		set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+		write_unlock_bh(&sk->sk_callback_lock);
+	}
+	/*
+	 * If iscsi_target_do_login returns zero to signal more PDU
+	 * exchanges are required to complete the login, go ahead and
+	 * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
+	 * is still active.
+	 *
+	 * Otherwise if TCP connection dropped asynchronously, go ahead
+	 * and perform connection cleanup now.
+	 */
+	ret = iscsi_target_do_login(conn, login);
+	if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+		ret = -1;
+
+	if (ret < 0) {
+		cancel_delayed_work_sync(&conn->login_work);
+		cancel_delayed_work_sync(&conn->login_cleanup_work);
+		iscsi_target_restore_sock_callbacks(conn);
+		iscsi_remove_failed_auth_entry(conn);
+	}
+	if (ret != 0)
+		iscsi_target_nego_release(conn);
+
+	return ret;
+}
+
+void iscsi_target_nego_release(struct iscsi_conn *conn)
+{
+	struct iscsi_login *login = conn->conn_login;
+
+	if (!login)
+		return;
+
+	kfree(login->req_buf);
+	kfree(login->rsp_buf);
+	kfree(login);
+
+	conn->conn_login = NULL;
+}
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
new file mode 100644
index 0000000..f021cbd
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -0,0 +1,20 @@
+#ifndef ISCSI_TARGET_NEGO_H
+#define ISCSI_TARGET_NEGO_H
+
+#define DECIMAL         0
+#define HEX             1
+
+extern void convert_null_to_semi(char *, int);
+extern int extract_param(const char *, const char *, unsigned int, char *,
+		unsigned char *);
+extern int iscsi_target_check_login_request(struct iscsi_conn *,
+		struct iscsi_login *);
+extern int iscsi_target_get_initial_payload(struct iscsi_conn *,
+		struct iscsi_login *);
+extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsi_conn *,
+		struct iscsi_login *);
+extern int iscsi_target_start_negotiation(
+		struct iscsi_login *, struct iscsi_conn *);
+extern void iscsi_target_nego_release(struct iscsi_conn *);
+
+#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
new file mode 100644
index 0000000..208cca8
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -0,0 +1,261 @@
+/*******************************************************************************
+ * This file contains the main functions related to Initiator Node Attributes.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_nodeattrib.h"
+
+static inline char *iscsit_na_get_initiatorname(
+	struct iscsi_node_acl *nacl)
+{
+	struct se_node_acl *se_nacl = &nacl->se_node_acl;
+
+	return &se_nacl->initiatorname[0];
+}
+
+void iscsit_set_default_node_attribues(
+	struct iscsi_node_acl *acl,
+	struct iscsi_portal_group *tpg)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	a->dataout_timeout = NA_DATAOUT_TIMEOUT;
+	a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
+	a->nopin_timeout = NA_NOPIN_TIMEOUT;
+	a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT;
+	a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
+	a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
+	a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
+	a->default_erl = tpg->tpg_attrib.default_erl;
+}
+
+int iscsit_na_dataout_timeout(
+	struct iscsi_node_acl *acl,
+	u32 dataout_timeout)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) {
+		pr_err("Requested DataOut Timeout %u larger than"
+			" maximum %u\n", dataout_timeout,
+			NA_DATAOUT_TIMEOUT_MAX);
+		return -EINVAL;
+	} else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) {
+		pr_err("Requested DataOut Timeout %u smaller than"
+			" minimum %u\n", dataout_timeout,
+			NA_DATAOUT_TIMEOUT_MIX);
+		return -EINVAL;
+	}
+
+	a->dataout_timeout = dataout_timeout;
+	pr_debug("Set DataOut Timeout to %u for Initiator Node"
+		" %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_dataout_timeout_retries(
+	struct iscsi_node_acl *acl,
+	u32 dataout_timeout_retries)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) {
+		pr_err("Requested DataOut Timeout Retries %u larger"
+			" than maximum %u", dataout_timeout_retries,
+				NA_DATAOUT_TIMEOUT_RETRIES_MAX);
+		return -EINVAL;
+	} else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) {
+		pr_err("Requested DataOut Timeout Retries %u smaller"
+			" than minimum %u", dataout_timeout_retries,
+				NA_DATAOUT_TIMEOUT_RETRIES_MIN);
+		return -EINVAL;
+	}
+
+	a->dataout_timeout_retries = dataout_timeout_retries;
+	pr_debug("Set DataOut Timeout Retries to %u for"
+		" Initiator Node %s\n", a->dataout_timeout_retries,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_nopin_timeout(
+	struct iscsi_node_acl *acl,
+	u32 nopin_timeout)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+	struct iscsi_session *sess;
+	struct iscsi_conn *conn;
+	struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
+	struct se_session *se_sess;
+	u32 orig_nopin_timeout = a->nopin_timeout;
+
+	if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) {
+		pr_err("Requested NopIn Timeout %u larger than maximum"
+			" %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX);
+		return -EINVAL;
+	} else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) &&
+		   (nopin_timeout != 0)) {
+		pr_err("Requested NopIn Timeout %u smaller than"
+			" minimum %u and not 0\n", nopin_timeout,
+			NA_NOPIN_TIMEOUT_MIN);
+		return -EINVAL;
+	}
+
+	a->nopin_timeout = nopin_timeout;
+	pr_debug("Set NopIn Timeout to %u for Initiator"
+		" Node %s\n", a->nopin_timeout,
+		iscsit_na_get_initiatorname(acl));
+	/*
+	 * Reenable disabled nopin_timeout timer for all iSCSI connections.
+	 */
+	if (!orig_nopin_timeout) {
+		spin_lock_bh(&se_nacl->nacl_sess_lock);
+		se_sess = se_nacl->nacl_sess;
+		if (se_sess) {
+			sess = se_sess->fabric_sess_ptr;
+
+			spin_lock(&sess->conn_lock);
+			list_for_each_entry(conn, &sess->sess_conn_list,
+					conn_list) {
+				if (conn->conn_state !=
+						TARG_CONN_STATE_LOGGED_IN)
+					continue;
+
+				spin_lock(&conn->nopin_timer_lock);
+				__iscsit_start_nopin_timer(conn);
+				spin_unlock(&conn->nopin_timer_lock);
+			}
+			spin_unlock(&sess->conn_lock);
+		}
+		spin_unlock_bh(&se_nacl->nacl_sess_lock);
+	}
+
+	return 0;
+}
+
+int iscsit_na_nopin_response_timeout(
+	struct iscsi_node_acl *acl,
+	u32 nopin_response_timeout)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) {
+		pr_err("Requested NopIn Response Timeout %u larger"
+			" than maximum %u\n", nopin_response_timeout,
+				NA_NOPIN_RESPONSE_TIMEOUT_MAX);
+		return -EINVAL;
+	} else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) {
+		pr_err("Requested NopIn Response Timeout %u smaller"
+			" than minimum %u\n", nopin_response_timeout,
+				NA_NOPIN_RESPONSE_TIMEOUT_MIN);
+		return -EINVAL;
+	}
+
+	a->nopin_response_timeout = nopin_response_timeout;
+	pr_debug("Set NopIn Response Timeout to %u for"
+		" Initiator Node %s\n", a->nopin_timeout,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_random_datain_pdu_offsets(
+	struct iscsi_node_acl *acl,
+	u32 random_datain_pdu_offsets)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) {
+		pr_err("Requested Random DataIN PDU Offsets: %u not"
+			" 0 or 1\n", random_datain_pdu_offsets);
+		return -EINVAL;
+	}
+
+	a->random_datain_pdu_offsets = random_datain_pdu_offsets;
+	pr_debug("Set Random DataIN PDU Offsets to %u for"
+		" Initiator Node %s\n", a->random_datain_pdu_offsets,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_random_datain_seq_offsets(
+	struct iscsi_node_acl *acl,
+	u32 random_datain_seq_offsets)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) {
+		pr_err("Requested Random DataIN Sequence Offsets: %u"
+			" not 0 or 1\n", random_datain_seq_offsets);
+		return -EINVAL;
+	}
+
+	a->random_datain_seq_offsets = random_datain_seq_offsets;
+	pr_debug("Set Random DataIN Sequence Offsets to %u for"
+		" Initiator Node %s\n", a->random_datain_seq_offsets,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_random_r2t_offsets(
+	struct iscsi_node_acl *acl,
+	u32 random_r2t_offsets)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (random_r2t_offsets != 0 && random_r2t_offsets != 1) {
+		pr_err("Requested Random R2T Offsets: %u not"
+			" 0 or 1\n", random_r2t_offsets);
+		return -EINVAL;
+	}
+
+	a->random_r2t_offsets = random_r2t_offsets;
+	pr_debug("Set Random R2T Offsets to %u for"
+		" Initiator Node %s\n", a->random_r2t_offsets,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_default_erl(
+	struct iscsi_node_acl *acl,
+	u32 default_erl)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (default_erl != 0 && default_erl != 1 && default_erl != 2) {
+		pr_err("Requested default ERL: %u not 0, 1, or 2\n",
+				default_erl);
+		return -EINVAL;
+	}
+
+	a->default_erl = default_erl;
+	pr_debug("Set use ERL0 flag to %u for Initiator"
+		" Node %s\n", a->default_erl,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
new file mode 100644
index 0000000..0c69a46
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -0,0 +1,15 @@
+#ifndef ISCSI_TARGET_NODEATTRIB_H
+#define ISCSI_TARGET_NODEATTRIB_H
+
+extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
+					      struct iscsi_portal_group *);
+extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
+extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_nopin_response_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_datain_pdu_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_datain_seq_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_r2t_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_default_erl(struct iscsi_node_acl *, u32);
+
+#endif /* ISCSI_TARGET_NODEATTRIB_H */
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
new file mode 100644
index 0000000..76bde76
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -0,0 +1,1727 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI Parameter negotiation.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/slab.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_util.h"
+#include "iscsi_target_parameters.h"
+
+int iscsi_login_rx_data(
+	struct iscsi_conn *conn,
+	char *buf,
+	int length)
+{
+	int rx_got;
+	struct kvec iov;
+
+	memset(&iov, 0, sizeof(struct kvec));
+	iov.iov_len	= length;
+	iov.iov_base	= buf;
+
+	rx_got = rx_data(conn, &iov, 1, length);
+	if (rx_got != length) {
+		pr_err("rx_data returned %d, expecting %d.\n",
+				rx_got, length);
+		return -1;
+	}
+
+	return 0 ;
+}
+
+int iscsi_login_tx_data(
+	struct iscsi_conn *conn,
+	char *pdu_buf,
+	char *text_buf,
+	int text_length)
+{
+	int length, tx_sent, iov_cnt = 1;
+	struct kvec iov[2];
+
+	length = (ISCSI_HDR_LEN + text_length);
+
+	memset(&iov[0], 0, 2 * sizeof(struct kvec));
+	iov[0].iov_len		= ISCSI_HDR_LEN;
+	iov[0].iov_base		= pdu_buf;
+
+	if (text_buf && text_length) {
+		iov[1].iov_len	= text_length;
+		iov[1].iov_base	= text_buf;
+		iov_cnt++;
+	}
+
+	tx_sent = tx_data(conn, &iov[0], iov_cnt, length);
+	if (tx_sent != length) {
+		pr_err("tx_data returned %d, expecting %d.\n",
+				tx_sent, length);
+		return -1;
+	}
+
+	return 0;
+}
+
+void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
+{
+	pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
+				"CRC32C" : "None");
+	pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
+				"CRC32C" : "None");
+	pr_debug("MaxRecvDataSegmentLength: %u\n",
+				conn_ops->MaxRecvDataSegmentLength);
+}
+
+void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
+{
+	pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
+	pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
+	pr_debug("TargetName: %s\n", sess_ops->TargetName);
+	pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
+	pr_debug("TargetPortalGroupTag: %hu\n",
+			sess_ops->TargetPortalGroupTag);
+	pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
+	pr_debug("InitialR2T: %s\n",
+			(sess_ops->InitialR2T) ? "Yes" : "No");
+	pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
+			"Yes" : "No");
+	pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
+	pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
+	pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
+	pr_debug("DefaultTime2Retain: %hu\n",
+			sess_ops->DefaultTime2Retain);
+	pr_debug("MaxOutstandingR2T: %hu\n",
+			sess_ops->MaxOutstandingR2T);
+	pr_debug("DataPDUInOrder: %s\n",
+			(sess_ops->DataPDUInOrder) ? "Yes" : "No");
+	pr_debug("DataSequenceInOrder: %s\n",
+			(sess_ops->DataSequenceInOrder) ? "Yes" : "No");
+	pr_debug("ErrorRecoveryLevel: %hu\n",
+			sess_ops->ErrorRecoveryLevel);
+	pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
+			"Discovery" : "Normal");
+}
+
+void iscsi_print_params(struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+
+	list_for_each_entry(param, &param_list->param_list, p_list)
+		pr_debug("%s: %s\n", param->name, param->value);
+}
+
+static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
+		char *name, char *value, u8 phase, u8 scope, u8 sender,
+		u16 type_range, u8 use)
+{
+	struct iscsi_param *param = NULL;
+
+	param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
+	if (!param) {
+		pr_err("Unable to allocate memory for parameter.\n");
+		goto out;
+	}
+	INIT_LIST_HEAD(&param->p_list);
+
+	param->name = kstrdup(name, GFP_KERNEL);
+	if (!param->name) {
+		pr_err("Unable to allocate memory for parameter name.\n");
+		goto out;
+	}
+
+	param->value = kstrdup(value, GFP_KERNEL);
+	if (!param->value) {
+		pr_err("Unable to allocate memory for parameter value.\n");
+		goto out;
+	}
+
+	param->phase		= phase;
+	param->scope		= scope;
+	param->sender		= sender;
+	param->use		= use;
+	param->type_range	= type_range;
+
+	switch (param->type_range) {
+	case TYPERANGE_BOOL_AND:
+		param->type = TYPE_BOOL_AND;
+		break;
+	case TYPERANGE_BOOL_OR:
+		param->type = TYPE_BOOL_OR;
+		break;
+	case TYPERANGE_0_TO_2:
+	case TYPERANGE_0_TO_3600:
+	case TYPERANGE_0_TO_32767:
+	case TYPERANGE_0_TO_65535:
+	case TYPERANGE_1_TO_65535:
+	case TYPERANGE_2_TO_3600:
+	case TYPERANGE_512_TO_16777215:
+		param->type = TYPE_NUMBER;
+		break;
+	case TYPERANGE_AUTH:
+	case TYPERANGE_DIGEST:
+		param->type = TYPE_VALUE_LIST | TYPE_STRING;
+		break;
+	case TYPERANGE_ISCSINAME:
+	case TYPERANGE_SESSIONTYPE:
+	case TYPERANGE_TARGETADDRESS:
+	case TYPERANGE_UTF8:
+		param->type = TYPE_STRING;
+		break;
+	default:
+		pr_err("Unknown type_range 0x%02x\n",
+				param->type_range);
+		goto out;
+	}
+	list_add_tail(&param->p_list, &param_list->param_list);
+
+	return param;
+out:
+	if (param) {
+		kfree(param->value);
+		kfree(param->name);
+		kfree(param);
+	}
+
+	return NULL;
+}
+
+/* #warning Add extension keys */
+int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
+{
+	struct iscsi_param *param = NULL;
+	struct iscsi_param_list *pl;
+
+	pl = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+	if (!pl) {
+		pr_err("Unable to allocate memory for"
+				" struct iscsi_param_list.\n");
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&pl->param_list);
+	INIT_LIST_HEAD(&pl->extra_response_list);
+
+	/*
+	 * The format for setting the initial parameter definitions are:
+	 *
+	 * Parameter name:
+	 * Initial value:
+	 * Allowable phase:
+	 * Scope:
+	 * Allowable senders:
+	 * Typerange:
+	 * Use:
+	 */
+	param = iscsi_set_default_param(pl, AUTHMETHOD, INITIAL_AUTHMETHOD,
+			PHASE_SECURITY, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_AUTH, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, HEADERDIGEST, INITIAL_HEADERDIGEST,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_DIGEST, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, DATADIGEST, INITIAL_DATADIGEST,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_DIGEST, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, MAXCONNECTIONS,
+			INITIAL_MAXCONNECTIONS, PHASE_OPERATIONAL,
+			SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, SENDTARGETS, INITIAL_SENDTARGETS,
+			PHASE_FFP0, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+			TYPERANGE_UTF8, 0);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, TARGETNAME, INITIAL_TARGETNAME,
+			PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_ISCSINAME, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, INITIATORNAME,
+			INITIAL_INITIATORNAME, PHASE_DECLARATIVE,
+			SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+			TYPERANGE_ISCSINAME, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, TARGETALIAS, INITIAL_TARGETALIAS,
+			PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
+			TYPERANGE_UTF8, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, INITIATORALIAS,
+			INITIAL_INITIATORALIAS, PHASE_DECLARATIVE,
+			SCOPE_SESSION_WIDE, SENDER_INITIATOR, TYPERANGE_UTF8,
+			USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, TARGETADDRESS,
+			INITIAL_TARGETADDRESS, PHASE_DECLARATIVE,
+			SCOPE_SESSION_WIDE, SENDER_TARGET,
+			TYPERANGE_TARGETADDRESS, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, TARGETPORTALGROUPTAG,
+			INITIAL_TARGETPORTALGROUPTAG,
+			PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
+			TYPERANGE_0_TO_65535, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, INITIALR2T, INITIAL_INITIALR2T,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, IMMEDIATEDATA,
+			INITIAL_IMMEDIATEDATA, PHASE_OPERATIONAL,
+			SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND,
+			USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, MAXXMITDATASEGMENTLENGTH,
+			INITIAL_MAXXMITDATASEGMENTLENGTH,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH,
+			INITIAL_MAXRECVDATASEGMENTLENGTH,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, MAXBURSTLENGTH,
+			INITIAL_MAXBURSTLENGTH, PHASE_OPERATIONAL,
+			SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, FIRSTBURSTLENGTH,
+			INITIAL_FIRSTBURSTLENGTH,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, DEFAULTTIME2WAIT,
+			INITIAL_DEFAULTTIME2WAIT,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, DEFAULTTIME2RETAIN,
+			INITIAL_DEFAULTTIME2RETAIN,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, MAXOUTSTANDINGR2T,
+			INITIAL_MAXOUTSTANDINGR2T,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, DATAPDUINORDER,
+			INITIAL_DATAPDUINORDER, PHASE_OPERATIONAL,
+			SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_OR,
+			USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, DATASEQUENCEINORDER,
+			INITIAL_DATASEQUENCEINORDER,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, ERRORRECOVERYLEVEL,
+			INITIAL_ERRORRECOVERYLEVEL,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_0_TO_2, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, SESSIONTYPE, INITIAL_SESSIONTYPE,
+			PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+			TYPERANGE_SESSIONTYPE, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, IFMARKER, INITIAL_IFMARKER,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, OFMARKER, INITIAL_OFMARKER,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_UTF8, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_UTF8, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	/*
+	 * Extra parameters for ISER from RFC-5046
+	 */
+	param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, INITIATORRECVDATASEGMENTLENGTH,
+			INITIAL_INITIATORRECVDATASEGMENTLENGTH,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, TARGETRECVDATASEGMENTLENGTH,
+			INITIAL_TARGETRECVDATASEGMENTLENGTH,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_ALL);
+	if (!param)
+		goto out;
+
+	*param_list_ptr = pl;
+	return 0;
+out:
+	iscsi_release_param_list(pl);
+	return -1;
+}
+
+int iscsi_set_keys_to_negotiate(
+	struct iscsi_param_list *param_list,
+	bool iser)
+{
+	struct iscsi_param *param;
+
+	param_list->iser = iser;
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		param->state = 0;
+		if (!strcmp(param->name, AUTHMETHOD)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, HEADERDIGEST)) {
+			if (!iser)
+				SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, DATADIGEST)) {
+			if (!iser)
+				SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, MAXCONNECTIONS)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, TARGETNAME)) {
+			continue;
+		} else if (!strcmp(param->name, INITIATORNAME)) {
+			continue;
+		} else if (!strcmp(param->name, TARGETALIAS)) {
+			if (param->value)
+				SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, INITIATORALIAS)) {
+			continue;
+		} else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, INITIALR2T)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, IMMEDIATEDATA)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+			if (!iser)
+				SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
+			continue;
+		} else if (!strcmp(param->name, MAXBURSTLENGTH)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, DATAPDUINORDER)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, SESSIONTYPE)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, IFMARKER)) {
+			SET_PSTATE_REJECT(param);
+		} else if (!strcmp(param->name, OFMARKER)) {
+			SET_PSTATE_REJECT(param);
+		} else if (!strcmp(param->name, IFMARKINT)) {
+			SET_PSTATE_REJECT(param);
+		} else if (!strcmp(param->name, OFMARKINT)) {
+			SET_PSTATE_REJECT(param);
+		} else if (!strcmp(param->name, RDMAEXTENSIONS)) {
+			if (iser)
+				SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
+			if (iser)
+				SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
+			if (iser)
+				SET_PSTATE_NEGOTIATE(param);
+		}
+	}
+
+	return 0;
+}
+
+int iscsi_set_keys_irrelevant_for_discovery(
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!strcmp(param->name, MAXCONNECTIONS))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, INITIALR2T))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, IMMEDIATEDATA))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, MAXBURSTLENGTH))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, FIRSTBURSTLENGTH))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, MAXOUTSTANDINGR2T))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, DATAPDUINORDER))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, DATASEQUENCEINORDER))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, ERRORRECOVERYLEVEL))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, DEFAULTTIME2WAIT))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, IFMARKER))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, OFMARKER))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, IFMARKINT))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, OFMARKINT))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, RDMAEXTENSIONS))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH))
+			param->state &= ~PSTATE_NEGOTIATE;
+	}
+
+	return 0;
+}
+
+int iscsi_copy_param_list(
+	struct iscsi_param_list **dst_param_list,
+	struct iscsi_param_list *src_param_list,
+	int leading)
+{
+	struct iscsi_param *param = NULL;
+	struct iscsi_param *new_param = NULL;
+	struct iscsi_param_list *param_list = NULL;
+
+	param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+	if (!param_list) {
+		pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&param_list->param_list);
+	INIT_LIST_HEAD(&param_list->extra_response_list);
+
+	list_for_each_entry(param, &src_param_list->param_list, p_list) {
+		if (!leading && (param->scope & SCOPE_SESSION_WIDE)) {
+			if ((strcmp(param->name, "TargetName") != 0) &&
+			    (strcmp(param->name, "InitiatorName") != 0) &&
+			    (strcmp(param->name, "TargetPortalGroupTag") != 0))
+				continue;
+		}
+
+		new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
+		if (!new_param) {
+			pr_err("Unable to allocate memory for struct iscsi_param.\n");
+			goto err_out;
+		}
+
+		new_param->name = kstrdup(param->name, GFP_KERNEL);
+		new_param->value = kstrdup(param->value, GFP_KERNEL);
+		if (!new_param->value || !new_param->name) {
+			kfree(new_param->value);
+			kfree(new_param->name);
+			kfree(new_param);
+			pr_err("Unable to allocate memory for parameter name/value.\n");
+			goto err_out;
+		}
+
+		new_param->set_param = param->set_param;
+		new_param->phase = param->phase;
+		new_param->scope = param->scope;
+		new_param->sender = param->sender;
+		new_param->type = param->type;
+		new_param->use = param->use;
+		new_param->type_range = param->type_range;
+
+		list_add_tail(&new_param->p_list, &param_list->param_list);
+	}
+
+	if (!list_empty(&param_list->param_list)) {
+		*dst_param_list = param_list;
+	} else {
+		pr_err("No parameters allocated.\n");
+		goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	iscsi_release_param_list(param_list);
+	return -ENOMEM;
+}
+
+static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
+{
+	struct iscsi_extra_response *er, *er_tmp;
+
+	list_for_each_entry_safe(er, er_tmp, &param_list->extra_response_list,
+			er_list) {
+		list_del(&er->er_list);
+		kfree(er);
+	}
+}
+
+void iscsi_release_param_list(struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param, *param_tmp;
+
+	list_for_each_entry_safe(param, param_tmp, &param_list->param_list,
+			p_list) {
+		list_del(&param->p_list);
+
+		kfree(param->name);
+		kfree(param->value);
+		kfree(param);
+	}
+
+	iscsi_release_extra_responses(param_list);
+
+	kfree(param_list);
+}
+
+struct iscsi_param *iscsi_find_param_from_key(
+	char *key,
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+
+	if (!key || !param_list) {
+		pr_err("Key or parameter list pointer is NULL.\n");
+		return NULL;
+	}
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!strcmp(key, param->name))
+			return param;
+	}
+
+	pr_err("Unable to locate key \"%s\".\n", key);
+	return NULL;
+}
+
+int iscsi_extract_key_value(char *textbuf, char **key, char **value)
+{
+	*value = strchr(textbuf, '=');
+	if (!*value) {
+		pr_err("Unable to locate \"=\" separator for key,"
+				" ignoring request.\n");
+		return -1;
+	}
+
+	*key = textbuf;
+	**value = '\0';
+	*value = *value + 1;
+
+	return 0;
+}
+
+int iscsi_update_param_value(struct iscsi_param *param, char *value)
+{
+	kfree(param->value);
+
+	param->value = kstrdup(value, GFP_KERNEL);
+	if (!param->value) {
+		pr_err("Unable to allocate memory for value.\n");
+		return -ENOMEM;
+	}
+
+	pr_debug("iSCSI Parameter updated to %s=%s\n",
+			param->name, param->value);
+	return 0;
+}
+
+static int iscsi_add_notunderstood_response(
+	char *key,
+	char *value,
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_extra_response *extra_response;
+
+	if (strlen(value) > VALUE_MAXLEN) {
+		pr_err("Value for notunderstood key \"%s\" exceeds %d,"
+			" protocol error.\n", key, VALUE_MAXLEN);
+		return -1;
+	}
+
+	extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL);
+	if (!extra_response) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_extra_response.\n");
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&extra_response->er_list);
+
+	strlcpy(extra_response->key, key, sizeof(extra_response->key));
+	strlcpy(extra_response->value, NOTUNDERSTOOD,
+		sizeof(extra_response->value));
+
+	list_add_tail(&extra_response->er_list,
+			&param_list->extra_response_list);
+	return 0;
+}
+
+static int iscsi_check_for_auth_key(char *key)
+{
+	/*
+	 * RFC 1994
+	 */
+	if (!strcmp(key, "CHAP_A") || !strcmp(key, "CHAP_I") ||
+	    !strcmp(key, "CHAP_C") || !strcmp(key, "CHAP_N") ||
+	    !strcmp(key, "CHAP_R"))
+		return 1;
+
+	/*
+	 * RFC 2945
+	 */
+	if (!strcmp(key, "SRP_U") || !strcmp(key, "SRP_N") ||
+	    !strcmp(key, "SRP_g") || !strcmp(key, "SRP_s") ||
+	    !strcmp(key, "SRP_A") || !strcmp(key, "SRP_B") ||
+	    !strcmp(key, "SRP_M") || !strcmp(key, "SRP_HM"))
+		return 1;
+
+	return 0;
+}
+
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param,
+						    bool keys_workaround)
+{
+	if (IS_TYPE_BOOL_AND(param)) {
+		if (!strcmp(param->value, NO))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+	} else if (IS_TYPE_BOOL_OR(param)) {
+		if (!strcmp(param->value, YES))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+
+		if (keys_workaround) {
+			/*
+			 * Required for gPXE iSCSI boot client
+			 */
+			if (!strcmp(param->name, IMMEDIATEDATA))
+				SET_PSTATE_REPLY_OPTIONAL(param);
+		}
+	} else if (IS_TYPE_NUMBER(param)) {
+		if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+
+		if (keys_workaround) {
+			/*
+			 * Required for Mellanox Flexboot PXE boot ROM
+			 */
+			if (!strcmp(param->name, FIRSTBURSTLENGTH))
+				SET_PSTATE_REPLY_OPTIONAL(param);
+
+			/*
+			 * Required for gPXE iSCSI boot client
+			 */
+			if (!strcmp(param->name, MAXCONNECTIONS))
+				SET_PSTATE_REPLY_OPTIONAL(param);
+		}
+	} else if (IS_PHASE_DECLARATIVE(param))
+		SET_PSTATE_REPLY_OPTIONAL(param);
+}
+
+static int iscsi_check_boolean_value(struct iscsi_param *param, char *value)
+{
+	if (strcmp(value, YES) && strcmp(value, NO)) {
+		pr_err("Illegal value for \"%s\", must be either"
+			" \"%s\" or \"%s\".\n", param->name, YES, NO);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_ptr)
+{
+	char *tmpptr;
+	int value = 0;
+
+	value = simple_strtoul(value_ptr, &tmpptr, 0);
+
+	if (IS_TYPERANGE_0_TO_2(param)) {
+		if ((value < 0) || (value > 2)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 0 and 2.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_0_TO_3600(param)) {
+		if ((value < 0) || (value > 3600)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 0 and 3600.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_0_TO_32767(param)) {
+		if ((value < 0) || (value > 32767)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 0 and 32767.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_0_TO_65535(param)) {
+		if ((value < 0) || (value > 65535)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 0 and 65535.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_1_TO_65535(param)) {
+		if ((value < 1) || (value > 65535)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 1 and 65535.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_2_TO_3600(param)) {
+		if ((value < 2) || (value > 3600)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 2 and 3600.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_512_TO_16777215(param)) {
+		if ((value < 512) || (value > 16777215)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 512 and 16777215.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+
+	return 0;
+}
+
+static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
+{
+	if (IS_PSTATE_PROPOSER(param))
+		return 0;
+
+	if (IS_TYPERANGE_AUTH_PARAM(param)) {
+		if (strcmp(value, KRB5) && strcmp(value, SPKM1) &&
+		    strcmp(value, SPKM2) && strcmp(value, SRP) &&
+		    strcmp(value, CHAP) && strcmp(value, NONE)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" \"%s\", \"%s\", \"%s\", \"%s\", \"%s\""
+				" or \"%s\".\n", param->name, KRB5,
+					SPKM1, SPKM2, SRP, CHAP, NONE);
+			return -1;
+		}
+	}
+	if (IS_TYPERANGE_DIGEST_PARAM(param)) {
+		if (strcmp(value, CRC32C) && strcmp(value, NONE)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" \"%s\" or \"%s\".\n", param->name,
+					CRC32C, NONE);
+			return -1;
+		}
+	}
+	if (IS_TYPERANGE_SESSIONTYPE(param)) {
+		if (strcmp(value, DISCOVERY) && strcmp(value, NORMAL)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" \"%s\" or \"%s\".\n", param->name,
+					DISCOVERY, NORMAL);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static char *iscsi_check_valuelist_for_support(
+	struct iscsi_param *param,
+	char *value)
+{
+	char *tmp1 = NULL, *tmp2 = NULL;
+	char *acceptor_values = NULL, *proposer_values = NULL;
+
+	acceptor_values = param->value;
+	proposer_values = value;
+
+	do {
+		if (!proposer_values)
+			return NULL;
+		tmp1 = strchr(proposer_values, ',');
+		if (tmp1)
+			*tmp1 = '\0';
+		acceptor_values = param->value;
+		do {
+			if (!acceptor_values) {
+				if (tmp1)
+					*tmp1 = ',';
+				return NULL;
+			}
+			tmp2 = strchr(acceptor_values, ',');
+			if (tmp2)
+				*tmp2 = '\0';
+			if (!strcmp(acceptor_values, proposer_values)) {
+				if (tmp2)
+					*tmp2 = ',';
+				goto out;
+			}
+			if (tmp2)
+				*tmp2++ = ',';
+
+			acceptor_values = tmp2;
+		} while (acceptor_values);
+		if (tmp1)
+			*tmp1++ = ',';
+		proposer_values = tmp1;
+	} while (proposer_values);
+
+out:
+	return proposer_values;
+}
+
+static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
+				struct iscsi_conn *conn)
+{
+	u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
+	char *negotiated_value = NULL;
+
+	if (IS_PSTATE_ACCEPTOR(param)) {
+		pr_err("Received key \"%s\" twice, protocol error.\n",
+				param->name);
+		return -1;
+	}
+
+	if (IS_PSTATE_REJECT(param))
+		return 0;
+
+	if (IS_TYPE_BOOL_AND(param)) {
+		if (!strcmp(value, YES))
+			proposer_boolean_value = 1;
+		if (!strcmp(param->value, YES))
+			acceptor_boolean_value = 1;
+		if (acceptor_boolean_value && proposer_boolean_value)
+			do {} while (0);
+		else {
+			if (iscsi_update_param_value(param, NO) < 0)
+				return -1;
+			if (!proposer_boolean_value)
+				SET_PSTATE_REPLY_OPTIONAL(param);
+		}
+	} else if (IS_TYPE_BOOL_OR(param)) {
+		if (!strcmp(value, YES))
+			proposer_boolean_value = 1;
+		if (!strcmp(param->value, YES))
+			acceptor_boolean_value = 1;
+		if (acceptor_boolean_value || proposer_boolean_value) {
+			if (iscsi_update_param_value(param, YES) < 0)
+				return -1;
+			if (proposer_boolean_value)
+				SET_PSTATE_REPLY_OPTIONAL(param);
+		}
+	} else if (IS_TYPE_NUMBER(param)) {
+		char *tmpptr, buf[11];
+		u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
+		u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
+
+		memset(buf, 0, sizeof(buf));
+
+		if (!strcmp(param->name, MAXCONNECTIONS) ||
+		    !strcmp(param->name, MAXBURSTLENGTH) ||
+		    !strcmp(param->name, FIRSTBURSTLENGTH) ||
+		    !strcmp(param->name, MAXOUTSTANDINGR2T) ||
+		    !strcmp(param->name, DEFAULTTIME2RETAIN) ||
+		    !strcmp(param->name, ERRORRECOVERYLEVEL)) {
+			if (proposer_value > acceptor_value) {
+				sprintf(buf, "%u", acceptor_value);
+				if (iscsi_update_param_value(param,
+						&buf[0]) < 0)
+					return -1;
+			} else {
+				if (iscsi_update_param_value(param, value) < 0)
+					return -1;
+			}
+		} else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+			if (acceptor_value > proposer_value) {
+				sprintf(buf, "%u", acceptor_value);
+				if (iscsi_update_param_value(param,
+						&buf[0]) < 0)
+					return -1;
+			} else {
+				if (iscsi_update_param_value(param, value) < 0)
+					return -1;
+			}
+		} else {
+			if (iscsi_update_param_value(param, value) < 0)
+				return -1;
+		}
+
+		if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+			struct iscsi_param *param_mxdsl;
+			unsigned long long tmp;
+			int rc;
+
+			rc = kstrtoull(param->value, 0, &tmp);
+			if (rc < 0)
+				return -1;
+
+			conn->conn_ops->MaxRecvDataSegmentLength = tmp;
+			pr_debug("Saving op->MaxRecvDataSegmentLength from"
+				" original initiator received value: %u\n",
+				conn->conn_ops->MaxRecvDataSegmentLength);
+
+			param_mxdsl = iscsi_find_param_from_key(
+						MAXXMITDATASEGMENTLENGTH,
+						conn->param_list);
+			if (!param_mxdsl)
+				return -1;
+
+			rc = iscsi_update_param_value(param,
+						param_mxdsl->value);
+			if (rc < 0)
+				return -1;
+
+			pr_debug("Updated %s to target MXDSL value: %s\n",
+					param->name, param->value);
+		}
+	} else if (IS_TYPE_VALUE_LIST(param)) {
+		negotiated_value = iscsi_check_valuelist_for_support(
+					param, value);
+		if (!negotiated_value) {
+			pr_err("Proposer's value list \"%s\" contains"
+				" no valid values from Acceptor's value list"
+				" \"%s\".\n", value, param->value);
+			return -1;
+		}
+		if (iscsi_update_param_value(param, negotiated_value) < 0)
+			return -1;
+	} else if (IS_PHASE_DECLARATIVE(param)) {
+		if (iscsi_update_param_value(param, value) < 0)
+			return -1;
+		SET_PSTATE_REPLY_OPTIONAL(param);
+	}
+
+	return 0;
+}
+
+static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
+{
+	if (IS_PSTATE_RESPONSE_GOT(param)) {
+		pr_err("Received key \"%s\" twice, protocol error.\n",
+				param->name);
+		return -1;
+	}
+
+	if (IS_TYPE_VALUE_LIST(param)) {
+		char *comma_ptr = NULL, *tmp_ptr = NULL;
+
+		comma_ptr = strchr(value, ',');
+		if (comma_ptr) {
+			pr_err("Illegal \",\" in response for \"%s\".\n",
+					param->name);
+			return -1;
+		}
+
+		tmp_ptr = iscsi_check_valuelist_for_support(param, value);
+		if (!tmp_ptr)
+			return -1;
+	}
+
+	if (iscsi_update_param_value(param, value) < 0)
+		return -1;
+
+	return 0;
+}
+
+static int iscsi_check_value(struct iscsi_param *param, char *value)
+{
+	char *comma_ptr = NULL;
+
+	if (!strcmp(value, REJECT)) {
+		if (!strcmp(param->name, IFMARKINT) ||
+		    !strcmp(param->name, OFMARKINT)) {
+			/*
+			 * Reject is not fatal for [I,O]FMarkInt,  and causes
+			 * [I,O]FMarker to be reset to No. (See iSCSI v20 A.3.2)
+			 */
+			SET_PSTATE_REJECT(param);
+			return 0;
+		}
+		pr_err("Received %s=%s\n", param->name, value);
+		return -1;
+	}
+	if (!strcmp(value, IRRELEVANT)) {
+		pr_debug("Received %s=%s\n", param->name, value);
+		SET_PSTATE_IRRELEVANT(param);
+		return 0;
+	}
+	if (!strcmp(value, NOTUNDERSTOOD)) {
+		if (!IS_PSTATE_PROPOSER(param)) {
+			pr_err("Received illegal offer %s=%s\n",
+				param->name, value);
+			return -1;
+		}
+
+/* #warning FIXME: Add check for X-ExtensionKey here */
+		pr_err("Standard iSCSI key \"%s\" cannot be answered"
+			" with \"%s\", protocol error.\n", param->name, value);
+		return -1;
+	}
+
+	do {
+		comma_ptr = NULL;
+		comma_ptr = strchr(value, ',');
+
+		if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) {
+			pr_err("Detected value separator \",\", but"
+				" key \"%s\" does not allow a value list,"
+				" protocol error.\n", param->name);
+			return -1;
+		}
+		if (comma_ptr)
+			*comma_ptr = '\0';
+
+		if (strlen(value) > VALUE_MAXLEN) {
+			pr_err("Value for key \"%s\" exceeds %d,"
+				" protocol error.\n", param->name,
+				VALUE_MAXLEN);
+			return -1;
+		}
+
+		if (IS_TYPE_BOOL_AND(param) || IS_TYPE_BOOL_OR(param)) {
+			if (iscsi_check_boolean_value(param, value) < 0)
+				return -1;
+		} else if (IS_TYPE_NUMBER(param)) {
+			if (iscsi_check_numerical_value(param, value) < 0)
+				return -1;
+		} else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
+			if (iscsi_check_string_or_list_value(param, value) < 0)
+				return -1;
+		} else {
+			pr_err("Huh? 0x%02x\n", param->type);
+			return -1;
+		}
+
+		if (comma_ptr)
+			*comma_ptr++ = ',';
+
+		value = comma_ptr;
+	} while (value);
+
+	return 0;
+}
+
+static struct iscsi_param *__iscsi_check_key(
+	char *key,
+	int sender,
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+
+	if (strlen(key) > KEY_MAXLEN) {
+		pr_err("Length of key name \"%s\" exceeds %d.\n",
+			key, KEY_MAXLEN);
+		return NULL;
+	}
+
+	param = iscsi_find_param_from_key(key, param_list);
+	if (!param)
+		return NULL;
+
+	if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
+		pr_err("Key \"%s\" may not be sent to %s,"
+			" protocol error.\n", param->name,
+			(sender & SENDER_RECEIVER) ? "target" : "initiator");
+		return NULL;
+	}
+
+	if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
+		pr_err("Key \"%s\" may not be sent to %s,"
+			" protocol error.\n", param->name,
+			(sender & SENDER_RECEIVER) ? "initiator" : "target");
+		return NULL;
+	}
+
+	return param;
+}
+
+static struct iscsi_param *iscsi_check_key(
+	char *key,
+	int phase,
+	int sender,
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+	/*
+	 * Key name length must not exceed 63 bytes. (See iSCSI v20 5.1)
+	 */
+	if (strlen(key) > KEY_MAXLEN) {
+		pr_err("Length of key name \"%s\" exceeds %d.\n",
+			key, KEY_MAXLEN);
+		return NULL;
+	}
+
+	param = iscsi_find_param_from_key(key, param_list);
+	if (!param)
+		return NULL;
+
+	if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
+		pr_err("Key \"%s\" may not be sent to %s,"
+			" protocol error.\n", param->name,
+			(sender & SENDER_RECEIVER) ? "target" : "initiator");
+		return NULL;
+	}
+	if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
+		pr_err("Key \"%s\" may not be sent to %s,"
+				" protocol error.\n", param->name,
+			(sender & SENDER_RECEIVER) ? "initiator" : "target");
+		return NULL;
+	}
+
+	if (IS_PSTATE_ACCEPTOR(param)) {
+		pr_err("Key \"%s\" received twice, protocol error.\n",
+				key);
+		return NULL;
+	}
+
+	if (!phase)
+		return param;
+
+	if (!(param->phase & phase)) {
+		pr_err("Key \"%s\" may not be negotiated during ",
+				param->name);
+		switch (phase) {
+		case PHASE_SECURITY:
+			pr_debug("Security phase.\n");
+			break;
+		case PHASE_OPERATIONAL:
+			pr_debug("Operational phase.\n");
+			break;
+		default:
+			pr_debug("Unknown phase.\n");
+		}
+		return NULL;
+	}
+
+	return param;
+}
+
+static int iscsi_enforce_integrity_rules(
+	u8 phase,
+	struct iscsi_param_list *param_list)
+{
+	char *tmpptr;
+	u8 DataSequenceInOrder = 0;
+	u8 ErrorRecoveryLevel = 0, SessionType = 0;
+	u32 FirstBurstLength = 0, MaxBurstLength = 0;
+	struct iscsi_param *param = NULL;
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!(param->phase & phase))
+			continue;
+		if (!strcmp(param->name, SESSIONTYPE))
+			if (!strcmp(param->value, NORMAL))
+				SessionType = 1;
+		if (!strcmp(param->name, ERRORRECOVERYLEVEL))
+			ErrorRecoveryLevel = simple_strtoul(param->value,
+					&tmpptr, 0);
+		if (!strcmp(param->name, DATASEQUENCEINORDER))
+			if (!strcmp(param->value, YES))
+				DataSequenceInOrder = 1;
+		if (!strcmp(param->name, MAXBURSTLENGTH))
+			MaxBurstLength = simple_strtoul(param->value,
+					&tmpptr, 0);
+	}
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!(param->phase & phase))
+			continue;
+		if (!SessionType && !IS_PSTATE_ACCEPTOR(param))
+			continue;
+		if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
+		    DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
+			if (strcmp(param->value, "1")) {
+				if (iscsi_update_param_value(param, "1") < 0)
+					return -1;
+				pr_debug("Reset \"%s\" to \"%s\".\n",
+					param->name, param->value);
+			}
+		}
+		if (!strcmp(param->name, MAXCONNECTIONS) && !SessionType) {
+			if (strcmp(param->value, "1")) {
+				if (iscsi_update_param_value(param, "1") < 0)
+					return -1;
+				pr_debug("Reset \"%s\" to \"%s\".\n",
+					param->name, param->value);
+			}
+		}
+		if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+			FirstBurstLength = simple_strtoul(param->value,
+					&tmpptr, 0);
+			if (FirstBurstLength > MaxBurstLength) {
+				char tmpbuf[11];
+				memset(tmpbuf, 0, sizeof(tmpbuf));
+				sprintf(tmpbuf, "%u", MaxBurstLength);
+				if (iscsi_update_param_value(param, tmpbuf))
+					return -1;
+				pr_debug("Reset \"%s\" to \"%s\".\n",
+					param->name, param->value);
+			}
+		}
+	}
+
+	return 0;
+}
+
+int iscsi_decode_text_input(
+	u8 phase,
+	u8 sender,
+	char *textbuf,
+	u32 length,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_param_list *param_list = conn->param_list;
+	char *tmpbuf, *start = NULL, *end = NULL;
+
+	tmpbuf = kzalloc(length + 1, GFP_KERNEL);
+	if (!tmpbuf) {
+		pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
+		return -ENOMEM;
+	}
+
+	memcpy(tmpbuf, textbuf, length);
+	tmpbuf[length] = '\0';
+	start = tmpbuf;
+	end = (start + length);
+
+	while (start < end) {
+		char *key, *value;
+		struct iscsi_param *param;
+
+		if (iscsi_extract_key_value(start, &key, &value) < 0) {
+			kfree(tmpbuf);
+			return -1;
+		}
+
+		pr_debug("Got key: %s=%s\n", key, value);
+
+		if (phase & PHASE_SECURITY) {
+			if (iscsi_check_for_auth_key(key) > 0) {
+				kfree(tmpbuf);
+				return 1;
+			}
+		}
+
+		param = iscsi_check_key(key, phase, sender, param_list);
+		if (!param) {
+			if (iscsi_add_notunderstood_response(key,
+					value, param_list) < 0) {
+				kfree(tmpbuf);
+				return -1;
+			}
+			start += strlen(key) + strlen(value) + 2;
+			continue;
+		}
+		if (iscsi_check_value(param, value) < 0) {
+			kfree(tmpbuf);
+			return -1;
+		}
+
+		start += strlen(key) + strlen(value) + 2;
+
+		if (IS_PSTATE_PROPOSER(param)) {
+			if (iscsi_check_proposer_state(param, value) < 0) {
+				kfree(tmpbuf);
+				return -1;
+			}
+			SET_PSTATE_RESPONSE_GOT(param);
+		} else {
+			if (iscsi_check_acceptor_state(param, value, conn) < 0) {
+				kfree(tmpbuf);
+				return -1;
+			}
+			SET_PSTATE_ACCEPTOR(param);
+		}
+	}
+
+	kfree(tmpbuf);
+	return 0;
+}
+
+int iscsi_encode_text_output(
+	u8 phase,
+	u8 sender,
+	char *textbuf,
+	u32 *length,
+	struct iscsi_param_list *param_list,
+	bool keys_workaround)
+{
+	char *output_buf = NULL;
+	struct iscsi_extra_response *er;
+	struct iscsi_param *param;
+
+	output_buf = textbuf + *length;
+
+	if (iscsi_enforce_integrity_rules(phase, param_list) < 0)
+		return -1;
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!(param->sender & sender))
+			continue;
+		if (IS_PSTATE_ACCEPTOR(param) &&
+		    !IS_PSTATE_RESPONSE_SENT(param) &&
+		    !IS_PSTATE_REPLY_OPTIONAL(param) &&
+		    (param->phase & phase)) {
+			*length += sprintf(output_buf, "%s=%s",
+				param->name, param->value);
+			*length += 1;
+			output_buf = textbuf + *length;
+			SET_PSTATE_RESPONSE_SENT(param);
+			pr_debug("Sending key: %s=%s\n",
+				param->name, param->value);
+			continue;
+		}
+		if (IS_PSTATE_NEGOTIATE(param) &&
+		    !IS_PSTATE_ACCEPTOR(param) &&
+		    !IS_PSTATE_PROPOSER(param) &&
+		    (param->phase & phase)) {
+			*length += sprintf(output_buf, "%s=%s",
+				param->name, param->value);
+			*length += 1;
+			output_buf = textbuf + *length;
+			SET_PSTATE_PROPOSER(param);
+			iscsi_check_proposer_for_optional_reply(param,
+							        keys_workaround);
+			pr_debug("Sending key: %s=%s\n",
+				param->name, param->value);
+		}
+	}
+
+	list_for_each_entry(er, &param_list->extra_response_list, er_list) {
+		*length += sprintf(output_buf, "%s=%s", er->key, er->value);
+		*length += 1;
+		output_buf = textbuf + *length;
+		pr_debug("Sending key: %s=%s\n", er->key, er->value);
+	}
+	iscsi_release_extra_responses(param_list);
+
+	return 0;
+}
+
+int iscsi_check_negotiated_keys(struct iscsi_param_list *param_list)
+{
+	int ret = 0;
+	struct iscsi_param *param;
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (IS_PSTATE_NEGOTIATE(param) &&
+		    IS_PSTATE_PROPOSER(param) &&
+		    !IS_PSTATE_RESPONSE_GOT(param) &&
+		    !IS_PSTATE_REPLY_OPTIONAL(param) &&
+		    !IS_PHASE_DECLARATIVE(param)) {
+			pr_err("No response for proposed key \"%s\".\n",
+					param->name);
+			ret = -1;
+		}
+	}
+
+	return ret;
+}
+
+int iscsi_change_param_value(
+	char *keyvalue,
+	struct iscsi_param_list *param_list,
+	int check_key)
+{
+	char *key = NULL, *value = NULL;
+	struct iscsi_param *param;
+	int sender = 0;
+
+	if (iscsi_extract_key_value(keyvalue, &key, &value) < 0)
+		return -1;
+
+	if (!check_key) {
+		param = __iscsi_check_key(keyvalue, sender, param_list);
+		if (!param)
+			return -1;
+	} else {
+		param = iscsi_check_key(keyvalue, 0, sender, param_list);
+		if (!param)
+			return -1;
+
+		param->set_param = 1;
+		if (iscsi_check_value(param, value) < 0) {
+			param->set_param = 0;
+			return -1;
+		}
+		param->set_param = 0;
+	}
+
+	if (iscsi_update_param_value(param, value) < 0)
+		return -1;
+
+	return 0;
+}
+
+void iscsi_set_connection_parameters(
+	struct iscsi_conn_ops *ops,
+	struct iscsi_param_list *param_list)
+{
+	char *tmpptr;
+	struct iscsi_param *param;
+
+	pr_debug("---------------------------------------------------"
+			"---------------\n");
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		/*
+		 * Special case to set MAXXMITDATASEGMENTLENGTH from the
+		 * target requested MaxRecvDataSegmentLength, even though
+		 * this key is not sent over the wire.
+		 */
+		if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
+			ops->MaxXmitDataSegmentLength =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("MaxXmitDataSegmentLength:     %s\n",
+				param->value);
+		}
+
+		if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
+			continue;
+		if (!strcmp(param->name, AUTHMETHOD)) {
+			pr_debug("AuthMethod:                   %s\n",
+				param->value);
+		} else if (!strcmp(param->name, HEADERDIGEST)) {
+			ops->HeaderDigest = !strcmp(param->value, CRC32C);
+			pr_debug("HeaderDigest:                 %s\n",
+				param->value);
+		} else if (!strcmp(param->name, DATADIGEST)) {
+			ops->DataDigest = !strcmp(param->value, CRC32C);
+			pr_debug("DataDigest:                   %s\n",
+				param->value);
+		} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+			/*
+			 * At this point iscsi_check_acceptor_state() will have
+			 * set ops->MaxRecvDataSegmentLength from the original
+			 * initiator provided value.
+			 */
+			pr_debug("MaxRecvDataSegmentLength:     %u\n",
+				ops->MaxRecvDataSegmentLength);
+		} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
+			ops->InitiatorRecvDataSegmentLength =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("InitiatorRecvDataSegmentLength: %s\n",
+				param->value);
+			ops->MaxRecvDataSegmentLength =
+					ops->InitiatorRecvDataSegmentLength;
+			pr_debug("Set MRDSL from InitiatorRecvDataSegmentLength\n");
+		} else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
+			ops->TargetRecvDataSegmentLength =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("TargetRecvDataSegmentLength:  %s\n",
+				param->value);
+			ops->MaxXmitDataSegmentLength =
+					ops->TargetRecvDataSegmentLength;
+			pr_debug("Set MXDSL from TargetRecvDataSegmentLength\n");
+		}
+	}
+	pr_debug("----------------------------------------------------"
+			"--------------\n");
+}
+
+void iscsi_set_session_parameters(
+	struct iscsi_sess_ops *ops,
+	struct iscsi_param_list *param_list,
+	int leading)
+{
+	char *tmpptr;
+	struct iscsi_param *param;
+
+	pr_debug("----------------------------------------------------"
+			"--------------\n");
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
+			continue;
+		if (!strcmp(param->name, INITIATORNAME)) {
+			if (!param->value)
+				continue;
+			if (leading)
+				snprintf(ops->InitiatorName,
+						sizeof(ops->InitiatorName),
+						"%s", param->value);
+			pr_debug("InitiatorName:                %s\n",
+				param->value);
+		} else if (!strcmp(param->name, INITIATORALIAS)) {
+			if (!param->value)
+				continue;
+			snprintf(ops->InitiatorAlias,
+						sizeof(ops->InitiatorAlias),
+						"%s", param->value);
+			pr_debug("InitiatorAlias:               %s\n",
+				param->value);
+		} else if (!strcmp(param->name, TARGETNAME)) {
+			if (!param->value)
+				continue;
+			if (leading)
+				snprintf(ops->TargetName,
+						sizeof(ops->TargetName),
+						"%s", param->value);
+			pr_debug("TargetName:                   %s\n",
+				param->value);
+		} else if (!strcmp(param->name, TARGETALIAS)) {
+			if (!param->value)
+				continue;
+			snprintf(ops->TargetAlias, sizeof(ops->TargetAlias),
+					"%s", param->value);
+			pr_debug("TargetAlias:                  %s\n",
+				param->value);
+		} else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
+			ops->TargetPortalGroupTag =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("TargetPortalGroupTag:         %s\n",
+				param->value);
+		} else if (!strcmp(param->name, MAXCONNECTIONS)) {
+			ops->MaxConnections =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("MaxConnections:               %s\n",
+				param->value);
+		} else if (!strcmp(param->name, INITIALR2T)) {
+			ops->InitialR2T = !strcmp(param->value, YES);
+			 pr_debug("InitialR2T:                   %s\n",
+				param->value);
+		} else if (!strcmp(param->name, IMMEDIATEDATA)) {
+			ops->ImmediateData = !strcmp(param->value, YES);
+			pr_debug("ImmediateData:                %s\n",
+				param->value);
+		} else if (!strcmp(param->name, MAXBURSTLENGTH)) {
+			ops->MaxBurstLength =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("MaxBurstLength:               %s\n",
+				param->value);
+		} else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+			ops->FirstBurstLength =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("FirstBurstLength:             %s\n",
+				param->value);
+		} else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+			ops->DefaultTime2Wait =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("DefaultTime2Wait:             %s\n",
+				param->value);
+		} else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
+			ops->DefaultTime2Retain =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("DefaultTime2Retain:           %s\n",
+				param->value);
+		} else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
+			ops->MaxOutstandingR2T =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("MaxOutstandingR2T:            %s\n",
+				param->value);
+		} else if (!strcmp(param->name, DATAPDUINORDER)) {
+			ops->DataPDUInOrder = !strcmp(param->value, YES);
+			pr_debug("DataPDUInOrder:               %s\n",
+				param->value);
+		} else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
+			ops->DataSequenceInOrder = !strcmp(param->value, YES);
+			pr_debug("DataSequenceInOrder:          %s\n",
+				param->value);
+		} else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
+			ops->ErrorRecoveryLevel =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("ErrorRecoveryLevel:           %s\n",
+				param->value);
+		} else if (!strcmp(param->name, SESSIONTYPE)) {
+			ops->SessionType = !strcmp(param->value, DISCOVERY);
+			pr_debug("SessionType:                  %s\n",
+				param->value);
+		} else if (!strcmp(param->name, RDMAEXTENSIONS)) {
+			ops->RDMAExtensions = !strcmp(param->value, YES);
+			pr_debug("RDMAExtensions:               %s\n",
+				param->value);
+		}
+	}
+	pr_debug("----------------------------------------------------"
+			"--------------\n");
+
+}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
new file mode 100644
index 0000000..17a58c2
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -0,0 +1,289 @@
+#ifndef ISCSI_PARAMETERS_H
+#define ISCSI_PARAMETERS_H
+
+#include <scsi/iscsi_proto.h>
+
+struct iscsi_extra_response {
+	char key[KEY_MAXLEN];
+	char value[32];
+	struct list_head er_list;
+} ____cacheline_aligned;
+
+struct iscsi_param {
+	char *name;
+	char *value;
+	u8 set_param;
+	u8 phase;
+	u8 scope;
+	u8 sender;
+	u8 type;
+	u8 use;
+	u16 type_range;
+	u32 state;
+	struct list_head p_list;
+} ____cacheline_aligned;
+
+extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
+extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
+extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
+extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
+extern void iscsi_print_params(struct iscsi_param_list *);
+extern int iscsi_create_default_params(struct iscsi_param_list **);
+extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool);
+extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
+extern int iscsi_copy_param_list(struct iscsi_param_list **,
+			struct iscsi_param_list *, int);
+extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int);
+extern void iscsi_release_param_list(struct iscsi_param_list *);
+extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
+extern int iscsi_extract_key_value(char *, char **, char **);
+extern int iscsi_update_param_value(struct iscsi_param *, char *);
+extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
+extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
+			struct iscsi_param_list *, bool);
+extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
+extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
+			struct iscsi_param_list *);
+extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
+			struct iscsi_param_list *, int);
+
+#define YES				"Yes"
+#define NO				"No"
+#define ALL				"All"
+#define IRRELEVANT			"Irrelevant"
+#define NONE				"None"
+#define NOTUNDERSTOOD			"NotUnderstood"
+#define REJECT				"Reject"
+
+/*
+ * The Parameter Names.
+ */
+#define AUTHMETHOD			"AuthMethod"
+#define HEADERDIGEST			"HeaderDigest"
+#define DATADIGEST			"DataDigest"
+#define MAXCONNECTIONS			"MaxConnections"
+#define SENDTARGETS			"SendTargets"
+#define TARGETNAME			"TargetName"
+#define INITIATORNAME			"InitiatorName"
+#define TARGETALIAS			"TargetAlias"
+#define INITIATORALIAS			"InitiatorAlias"
+#define TARGETADDRESS			"TargetAddress"
+#define TARGETPORTALGROUPTAG		"TargetPortalGroupTag"
+#define INITIALR2T			"InitialR2T"
+#define IMMEDIATEDATA			"ImmediateData"
+#define MAXRECVDATASEGMENTLENGTH	"MaxRecvDataSegmentLength"
+#define MAXXMITDATASEGMENTLENGTH	"MaxXmitDataSegmentLength"
+#define MAXBURSTLENGTH			"MaxBurstLength"
+#define FIRSTBURSTLENGTH		"FirstBurstLength"
+#define DEFAULTTIME2WAIT		"DefaultTime2Wait"
+#define DEFAULTTIME2RETAIN		"DefaultTime2Retain"
+#define MAXOUTSTANDINGR2T		"MaxOutstandingR2T"
+#define DATAPDUINORDER			"DataPDUInOrder"
+#define DATASEQUENCEINORDER		"DataSequenceInOrder"
+#define ERRORRECOVERYLEVEL		"ErrorRecoveryLevel"
+#define SESSIONTYPE			"SessionType"
+#define IFMARKER			"IFMarker"
+#define OFMARKER			"OFMarker"
+#define IFMARKINT			"IFMarkInt"
+#define OFMARKINT			"OFMarkInt"
+#define X_EXTENSIONKEY			"X-com.sbei.version"
+#define X_EXTENSIONKEY_CISCO_NEW	"X-com.cisco.protocol"
+#define X_EXTENSIONKEY_CISCO_OLD	"X-com.cisco.iscsi.draft"
+
+/*
+ * Parameter names of iSCSI Extentions for RDMA (iSER).  See RFC-5046
+ */
+#define RDMAEXTENSIONS			"RDMAExtensions"
+#define INITIATORRECVDATASEGMENTLENGTH	"InitiatorRecvDataSegmentLength"
+#define TARGETRECVDATASEGMENTLENGTH	"TargetRecvDataSegmentLength"
+
+/*
+ * For AuthMethod.
+ */
+#define KRB5				"KRB5"
+#define SPKM1				"SPKM1"
+#define SPKM2				"SPKM2"
+#define SRP				"SRP"
+#define CHAP				"CHAP"
+
+/*
+ * Initial values for Parameter Negotiation.
+ */
+#define INITIAL_AUTHMETHOD			CHAP
+#define INITIAL_HEADERDIGEST			"CRC32C,None"
+#define INITIAL_DATADIGEST			"CRC32C,None"
+#define INITIAL_MAXCONNECTIONS			"1"
+#define INITIAL_SENDTARGETS			ALL
+#define INITIAL_TARGETNAME			"LIO.Target"
+#define INITIAL_INITIATORNAME			"LIO.Initiator"
+#define INITIAL_TARGETALIAS			"LIO Target"
+#define INITIAL_INITIATORALIAS			"LIO Initiator"
+#define INITIAL_TARGETADDRESS			"0.0.0.0:0000,0"
+#define INITIAL_TARGETPORTALGROUPTAG		"1"
+#define INITIAL_INITIALR2T			YES
+#define INITIAL_IMMEDIATEDATA			YES
+#define INITIAL_MAXRECVDATASEGMENTLENGTH	"8192"
+/*
+ * Match outgoing MXDSL default to incoming Open-iSCSI default
+ */
+#define INITIAL_MAXXMITDATASEGMENTLENGTH	"262144"
+#define INITIAL_MAXBURSTLENGTH			"262144"
+#define INITIAL_FIRSTBURSTLENGTH		"65536"
+#define INITIAL_DEFAULTTIME2WAIT		"2"
+#define INITIAL_DEFAULTTIME2RETAIN		"20"
+#define INITIAL_MAXOUTSTANDINGR2T		"1"
+#define INITIAL_DATAPDUINORDER			YES
+#define INITIAL_DATASEQUENCEINORDER		YES
+#define INITIAL_ERRORRECOVERYLEVEL		"0"
+#define INITIAL_SESSIONTYPE			NORMAL
+#define INITIAL_IFMARKER			NO
+#define INITIAL_OFMARKER			NO
+#define INITIAL_IFMARKINT			REJECT
+#define INITIAL_OFMARKINT			REJECT
+
+/*
+ * Initial values for iSER parameters following RFC-5046 Section 6
+ */
+#define INITIAL_RDMAEXTENSIONS			NO
+#define INITIAL_INITIATORRECVDATASEGMENTLENGTH	"262144"
+#define INITIAL_TARGETRECVDATASEGMENTLENGTH	"8192"
+
+/*
+ * For [Header,Data]Digests.
+ */
+#define CRC32C				"CRC32C"
+
+/*
+ * For SessionType.
+ */
+#define DISCOVERY			"Discovery"
+#define NORMAL				"Normal"
+
+/*
+ * struct iscsi_param->use
+ */
+#define USE_LEADING_ONLY		0x01
+#define USE_INITIAL_ONLY		0x02
+#define USE_ALL				0x04
+
+#define IS_USE_LEADING_ONLY(p)		((p)->use & USE_LEADING_ONLY)
+#define IS_USE_INITIAL_ONLY(p)		((p)->use & USE_INITIAL_ONLY)
+#define IS_USE_ALL(p)			((p)->use & USE_ALL)
+
+#define SET_USE_INITIAL_ONLY(p)		((p)->use |= USE_INITIAL_ONLY)
+
+/*
+ * struct iscsi_param->sender
+ */
+#define	SENDER_INITIATOR		0x01
+#define SENDER_TARGET			0x02
+#define SENDER_BOTH			0x03
+/* Used in iscsi_check_key() */
+#define SENDER_RECEIVER			0x04
+
+#define IS_SENDER_INITIATOR(p)		((p)->sender & SENDER_INITIATOR)
+#define IS_SENDER_TARGET(p)		((p)->sender & SENDER_TARGET)
+#define IS_SENDER_BOTH(p)		((p)->sender & SENDER_BOTH)
+
+/*
+ * struct iscsi_param->scope
+ */
+#define SCOPE_CONNECTION_ONLY		0x01
+#define SCOPE_SESSION_WIDE		0x02
+
+#define IS_SCOPE_CONNECTION_ONLY(p)	((p)->scope & SCOPE_CONNECTION_ONLY)
+#define IS_SCOPE_SESSION_WIDE(p)	((p)->scope & SCOPE_SESSION_WIDE)
+
+/*
+ * struct iscsi_param->phase
+ */
+#define PHASE_SECURITY			0x01
+#define PHASE_OPERATIONAL		0x02
+#define PHASE_DECLARATIVE		0x04
+#define PHASE_FFP0			0x08
+
+#define IS_PHASE_SECURITY(p)		((p)->phase & PHASE_SECURITY)
+#define IS_PHASE_OPERATIONAL(p)		((p)->phase & PHASE_OPERATIONAL)
+#define IS_PHASE_DECLARATIVE(p)		((p)->phase & PHASE_DECLARATIVE)
+#define IS_PHASE_FFP0(p)		((p)->phase & PHASE_FFP0)
+
+/*
+ * struct iscsi_param->type
+ */
+#define TYPE_BOOL_AND			0x01
+#define TYPE_BOOL_OR			0x02
+#define TYPE_NUMBER			0x04
+#define TYPE_NUMBER_RANGE		0x08
+#define TYPE_STRING			0x10
+#define TYPE_VALUE_LIST			0x20
+
+#define IS_TYPE_BOOL_AND(p)		((p)->type & TYPE_BOOL_AND)
+#define IS_TYPE_BOOL_OR(p)		((p)->type & TYPE_BOOL_OR)
+#define IS_TYPE_NUMBER(p)		((p)->type & TYPE_NUMBER)
+#define IS_TYPE_NUMBER_RANGE(p)		((p)->type & TYPE_NUMBER_RANGE)
+#define IS_TYPE_STRING(p)		((p)->type & TYPE_STRING)
+#define IS_TYPE_VALUE_LIST(p)		((p)->type & TYPE_VALUE_LIST)
+
+/*
+ * struct iscsi_param->type_range
+ */
+#define TYPERANGE_BOOL_AND		0x0001
+#define TYPERANGE_BOOL_OR		0x0002
+#define TYPERANGE_0_TO_2		0x0004
+#define TYPERANGE_0_TO_3600		0x0008
+#define TYPERANGE_0_TO_32767		0x0010
+#define TYPERANGE_0_TO_65535		0x0020
+#define TYPERANGE_1_TO_65535		0x0040
+#define TYPERANGE_2_TO_3600		0x0080
+#define TYPERANGE_512_TO_16777215	0x0100
+#define TYPERANGE_AUTH			0x0200
+#define TYPERANGE_DIGEST		0x0400
+#define TYPERANGE_ISCSINAME		0x0800
+#define TYPERANGE_SESSIONTYPE		0x1000
+#define TYPERANGE_TARGETADDRESS		0x2000
+#define TYPERANGE_UTF8			0x4000
+
+#define IS_TYPERANGE_0_TO_2(p)		((p)->type_range & TYPERANGE_0_TO_2)
+#define IS_TYPERANGE_0_TO_3600(p)	((p)->type_range & TYPERANGE_0_TO_3600)
+#define IS_TYPERANGE_0_TO_32767(p)	((p)->type_range & TYPERANGE_0_TO_32767)
+#define IS_TYPERANGE_0_TO_65535(p)	((p)->type_range & TYPERANGE_0_TO_65535)
+#define IS_TYPERANGE_1_TO_65535(p)	((p)->type_range & TYPERANGE_1_TO_65535)
+#define IS_TYPERANGE_2_TO_3600(p)	((p)->type_range & TYPERANGE_2_TO_3600)
+#define IS_TYPERANGE_512_TO_16777215(p)	((p)->type_range & \
+						TYPERANGE_512_TO_16777215)
+#define IS_TYPERANGE_AUTH_PARAM(p)	((p)->type_range & TYPERANGE_AUTH)
+#define IS_TYPERANGE_DIGEST_PARAM(p)	((p)->type_range & TYPERANGE_DIGEST)
+#define IS_TYPERANGE_SESSIONTYPE(p)	((p)->type_range & \
+						TYPERANGE_SESSIONTYPE)
+
+/*
+ * struct iscsi_param->state
+ */
+#define PSTATE_ACCEPTOR			0x01
+#define PSTATE_NEGOTIATE		0x02
+#define PSTATE_PROPOSER			0x04
+#define PSTATE_IRRELEVANT		0x08
+#define PSTATE_REJECT			0x10
+#define PSTATE_REPLY_OPTIONAL		0x20
+#define PSTATE_RESPONSE_GOT		0x40
+#define PSTATE_RESPONSE_SENT		0x80
+
+#define IS_PSTATE_ACCEPTOR(p)		((p)->state & PSTATE_ACCEPTOR)
+#define IS_PSTATE_NEGOTIATE(p)		((p)->state & PSTATE_NEGOTIATE)
+#define IS_PSTATE_PROPOSER(p)		((p)->state & PSTATE_PROPOSER)
+#define IS_PSTATE_IRRELEVANT(p)		((p)->state & PSTATE_IRRELEVANT)
+#define IS_PSTATE_REJECT(p)		((p)->state & PSTATE_REJECT)
+#define IS_PSTATE_REPLY_OPTIONAL(p)	((p)->state & PSTATE_REPLY_OPTIONAL)
+#define IS_PSTATE_RESPONSE_GOT(p)	((p)->state & PSTATE_RESPONSE_GOT)
+#define IS_PSTATE_RESPONSE_SENT(p)	((p)->state & PSTATE_RESPONSE_SENT)
+
+#define SET_PSTATE_ACCEPTOR(p)		((p)->state |= PSTATE_ACCEPTOR)
+#define SET_PSTATE_NEGOTIATE(p)		((p)->state |= PSTATE_NEGOTIATE)
+#define SET_PSTATE_PROPOSER(p)		((p)->state |= PSTATE_PROPOSER)
+#define SET_PSTATE_IRRELEVANT(p)	((p)->state |= PSTATE_IRRELEVANT)
+#define SET_PSTATE_REJECT(p)		((p)->state |= PSTATE_REJECT)
+#define SET_PSTATE_REPLY_OPTIONAL(p)	((p)->state |= PSTATE_REPLY_OPTIONAL)
+#define SET_PSTATE_RESPONSE_GOT(p)	((p)->state |= PSTATE_RESPONSE_GOT)
+#define SET_PSTATE_RESPONSE_SENT(p)	((p)->state |= PSTATE_RESPONSE_SENT)
+
+#endif /* ISCSI_PARAMETERS_H */
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
new file mode 100644
index 0000000..e446a09
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -0,0 +1,700 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI DataSequenceInOrder=No
+ * and DataPDUInOrder=No.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/random.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_util.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_seq_pdu_list.h"
+
+#define OFFLOAD_BUF_SIZE	32768
+
+#ifdef DEBUG
+static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
+{
+	int i;
+	struct iscsi_seq *seq;
+
+	pr_debug("Dumping Sequence List for ITT: 0x%08x:\n",
+			cmd->init_task_tag);
+
+	for (i = 0; i < cmd->seq_count; i++) {
+		seq = &cmd->seq_list[i];
+		pr_debug("i: %d, pdu_start: %d, pdu_count: %d,"
+			" offset: %d, xfer_len: %d, seq_send_order: %d,"
+			" seq_no: %d\n", i, seq->pdu_start, seq->pdu_count,
+			seq->offset, seq->xfer_len, seq->seq_send_order,
+			seq->seq_no);
+	}
+}
+
+static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
+{
+	int i;
+	struct iscsi_pdu *pdu;
+
+	pr_debug("Dumping PDU List for ITT: 0x%08x:\n",
+			cmd->init_task_tag);
+
+	for (i = 0; i < cmd->pdu_count; i++) {
+		pdu = &cmd->pdu_list[i];
+		pr_debug("i: %d, offset: %d, length: %d,"
+			" pdu_send_order: %d, seq_no: %d\n", i, pdu->offset,
+			pdu->length, pdu->pdu_send_order, pdu->seq_no);
+	}
+}
+#else
+static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) {}
+static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) {}
+#endif
+
+static void iscsit_ordered_seq_lists(
+	struct iscsi_cmd *cmd,
+	u8 type)
+{
+	u32 i, seq_count = 0;
+
+	for (i = 0; i < cmd->seq_count; i++) {
+		if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+			continue;
+		cmd->seq_list[i].seq_send_order = seq_count++;
+	}
+}
+
+static void iscsit_ordered_pdu_lists(
+	struct iscsi_cmd *cmd,
+	u8 type)
+{
+	u32 i, pdu_send_order = 0, seq_no = 0;
+
+	for (i = 0; i < cmd->pdu_count; i++) {
+redo:
+		if (cmd->pdu_list[i].seq_no == seq_no) {
+			cmd->pdu_list[i].pdu_send_order = pdu_send_order++;
+			continue;
+		}
+		seq_no++;
+		pdu_send_order = 0;
+		goto redo;
+	}
+}
+
+/*
+ *	Generate count random values into array.
+ *	Use 0x80000000 to mark generates valued in array[].
+ */
+static void iscsit_create_random_array(u32 *array, u32 count)
+{
+	int i, j, k;
+
+	if (count == 1) {
+		array[0] = 0;
+		return;
+	}
+
+	for (i = 0; i < count; i++) {
+redo:
+		get_random_bytes(&j, sizeof(u32));
+		j = (1 + (int) (9999 + 1) - j) % count;
+		for (k = 0; k < i + 1; k++) {
+			j |= 0x80000000;
+			if ((array[k] & 0x80000000) && (array[k] == j))
+				goto redo;
+		}
+		array[i] = j;
+	}
+
+	for (i = 0; i < count; i++)
+		array[i] &= ~0x80000000;
+}
+
+static int iscsit_randomize_pdu_lists(
+	struct iscsi_cmd *cmd,
+	u8 type)
+{
+	int i = 0;
+	u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0;
+
+	for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) {
+redo:
+		if (cmd->pdu_list[pdu_count].seq_no == seq_no) {
+			seq_count++;
+			continue;
+		}
+		array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
+		if (!array) {
+			pr_err("Unable to allocate memory"
+				" for random array.\n");
+			return -ENOMEM;
+		}
+		iscsit_create_random_array(array, seq_count);
+
+		for (i = 0; i < seq_count; i++)
+			cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
+
+		kfree(array);
+
+		seq_offset += seq_count;
+		seq_count = 0;
+		seq_no++;
+		goto redo;
+	}
+
+	if (seq_count) {
+		array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
+		if (!array) {
+			pr_err("Unable to allocate memory for"
+				" random array.\n");
+			return -ENOMEM;
+		}
+		iscsit_create_random_array(array, seq_count);
+
+		for (i = 0; i < seq_count; i++)
+			cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
+
+		kfree(array);
+	}
+
+	return 0;
+}
+
+static int iscsit_randomize_seq_lists(
+	struct iscsi_cmd *cmd,
+	u8 type)
+{
+	int i, j = 0;
+	u32 *array, seq_count = cmd->seq_count;
+
+	if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED))
+		seq_count--;
+	else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED)
+		seq_count -= 2;
+
+	if (!seq_count)
+		return 0;
+
+	array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
+	if (!array) {
+		pr_err("Unable to allocate memory for random array.\n");
+		return -ENOMEM;
+	}
+	iscsit_create_random_array(array, seq_count);
+
+	for (i = 0; i < cmd->seq_count; i++) {
+		if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+			continue;
+		cmd->seq_list[i].seq_send_order = array[j++];
+	}
+
+	kfree(array);
+	return 0;
+}
+
+static void iscsit_determine_counts_for_list(
+	struct iscsi_cmd *cmd,
+	struct iscsi_build_list *bl,
+	u32 *seq_count,
+	u32 *pdu_count)
+{
+	int check_immediate = 0;
+	u32 burstlength = 0, offset = 0;
+	u32 unsolicited_data_length = 0;
+	u32 mdsl;
+	struct iscsi_conn *conn = cmd->conn;
+
+	if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
+		mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
+	else
+		mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength;
+
+	if ((bl->type == PDULIST_IMMEDIATE) ||
+	    (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+		check_immediate = 1;
+
+	if ((bl->type == PDULIST_UNSOLICITED) ||
+	    (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+		unsolicited_data_length = min(cmd->se_cmd.data_length,
+			conn->sess->sess_ops->FirstBurstLength);
+
+	while (offset < cmd->se_cmd.data_length) {
+		*pdu_count += 1;
+
+		if (check_immediate) {
+			check_immediate = 0;
+			offset += bl->immediate_data_length;
+			*seq_count += 1;
+			if (unsolicited_data_length)
+				unsolicited_data_length -=
+					bl->immediate_data_length;
+			continue;
+		}
+		if (unsolicited_data_length > 0) {
+			if ((offset + mdsl) >= cmd->se_cmd.data_length) {
+				unsolicited_data_length -=
+					(cmd->se_cmd.data_length - offset);
+				offset += (cmd->se_cmd.data_length - offset);
+				continue;
+			}
+			if ((offset + mdsl)
+					>= conn->sess->sess_ops->FirstBurstLength) {
+				unsolicited_data_length -=
+					(conn->sess->sess_ops->FirstBurstLength -
+					offset);
+				offset += (conn->sess->sess_ops->FirstBurstLength -
+					offset);
+				burstlength = 0;
+				*seq_count += 1;
+				continue;
+			}
+
+			offset += mdsl;
+			unsolicited_data_length -= mdsl;
+			continue;
+		}
+		if ((offset + mdsl) >= cmd->se_cmd.data_length) {
+			offset += (cmd->se_cmd.data_length - offset);
+			continue;
+		}
+		if ((burstlength + mdsl) >=
+		     conn->sess->sess_ops->MaxBurstLength) {
+			offset += (conn->sess->sess_ops->MaxBurstLength -
+					burstlength);
+			burstlength = 0;
+			*seq_count += 1;
+			continue;
+		}
+
+		burstlength += mdsl;
+		offset += mdsl;
+	}
+}
+
+
+/*
+ *	Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
+ *	or DataPDUInOrder=No.
+ */
+static int iscsit_do_build_pdu_and_seq_lists(
+	struct iscsi_cmd *cmd,
+	struct iscsi_build_list *bl)
+{
+	int check_immediate = 0, datapduinorder, datasequenceinorder;
+	u32 burstlength = 0, offset = 0, i = 0, mdsl;
+	u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *pdu = cmd->pdu_list;
+	struct iscsi_seq *seq = cmd->seq_list;
+
+	if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
+		mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
+	else
+		mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength;
+
+	datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
+	datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
+
+	if ((bl->type == PDULIST_IMMEDIATE) ||
+	    (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+		check_immediate = 1;
+
+	if ((bl->type == PDULIST_UNSOLICITED) ||
+	    (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+		unsolicited_data_length = min(cmd->se_cmd.data_length,
+			conn->sess->sess_ops->FirstBurstLength);
+
+	while (offset < cmd->se_cmd.data_length) {
+		pdu_count++;
+		if (!datapduinorder) {
+			pdu[i].offset = offset;
+			pdu[i].seq_no = seq_no;
+		}
+		if (!datasequenceinorder && (pdu_count == 1)) {
+			seq[seq_no].pdu_start = i;
+			seq[seq_no].seq_no = seq_no;
+			seq[seq_no].offset = offset;
+			seq[seq_no].orig_offset = offset;
+		}
+
+		if (check_immediate) {
+			check_immediate = 0;
+			if (!datapduinorder) {
+				pdu[i].type = PDUTYPE_IMMEDIATE;
+				pdu[i++].length = bl->immediate_data_length;
+			}
+			if (!datasequenceinorder) {
+				seq[seq_no].type = SEQTYPE_IMMEDIATE;
+				seq[seq_no].pdu_count = 1;
+				seq[seq_no].xfer_len =
+					bl->immediate_data_length;
+			}
+			offset += bl->immediate_data_length;
+			pdu_count = 0;
+			seq_no++;
+			if (unsolicited_data_length)
+				unsolicited_data_length -=
+					bl->immediate_data_length;
+			continue;
+		}
+		if (unsolicited_data_length > 0) {
+			if ((offset + mdsl) >= cmd->se_cmd.data_length) {
+				if (!datapduinorder) {
+					pdu[i].type = PDUTYPE_UNSOLICITED;
+					pdu[i].length =
+						(cmd->se_cmd.data_length - offset);
+				}
+				if (!datasequenceinorder) {
+					seq[seq_no].type = SEQTYPE_UNSOLICITED;
+					seq[seq_no].pdu_count = pdu_count;
+					seq[seq_no].xfer_len = (burstlength +
+						(cmd->se_cmd.data_length - offset));
+				}
+				unsolicited_data_length -=
+						(cmd->se_cmd.data_length - offset);
+				offset += (cmd->se_cmd.data_length - offset);
+				continue;
+			}
+			if ((offset + mdsl) >=
+					conn->sess->sess_ops->FirstBurstLength) {
+				if (!datapduinorder) {
+					pdu[i].type = PDUTYPE_UNSOLICITED;
+					pdu[i++].length =
+					   (conn->sess->sess_ops->FirstBurstLength -
+						offset);
+				}
+				if (!datasequenceinorder) {
+					seq[seq_no].type = SEQTYPE_UNSOLICITED;
+					seq[seq_no].pdu_count = pdu_count;
+					seq[seq_no].xfer_len = (burstlength +
+					   (conn->sess->sess_ops->FirstBurstLength -
+						offset));
+				}
+				unsolicited_data_length -=
+					(conn->sess->sess_ops->FirstBurstLength -
+						offset);
+				offset += (conn->sess->sess_ops->FirstBurstLength -
+						offset);
+				burstlength = 0;
+				pdu_count = 0;
+				seq_no++;
+				continue;
+			}
+
+			if (!datapduinorder) {
+				pdu[i].type = PDUTYPE_UNSOLICITED;
+				pdu[i++].length = mdsl;
+			}
+			burstlength += mdsl;
+			offset += mdsl;
+			unsolicited_data_length -= mdsl;
+			continue;
+		}
+		if ((offset + mdsl) >= cmd->se_cmd.data_length) {
+			if (!datapduinorder) {
+				pdu[i].type = PDUTYPE_NORMAL;
+				pdu[i].length = (cmd->se_cmd.data_length - offset);
+			}
+			if (!datasequenceinorder) {
+				seq[seq_no].type = SEQTYPE_NORMAL;
+				seq[seq_no].pdu_count = pdu_count;
+				seq[seq_no].xfer_len = (burstlength +
+					(cmd->se_cmd.data_length - offset));
+			}
+			offset += (cmd->se_cmd.data_length - offset);
+			continue;
+		}
+		if ((burstlength + mdsl) >=
+		     conn->sess->sess_ops->MaxBurstLength) {
+			if (!datapduinorder) {
+				pdu[i].type = PDUTYPE_NORMAL;
+				pdu[i++].length =
+					(conn->sess->sess_ops->MaxBurstLength -
+						burstlength);
+			}
+			if (!datasequenceinorder) {
+				seq[seq_no].type = SEQTYPE_NORMAL;
+				seq[seq_no].pdu_count = pdu_count;
+				seq[seq_no].xfer_len = (burstlength +
+					(conn->sess->sess_ops->MaxBurstLength -
+					burstlength));
+			}
+			offset += (conn->sess->sess_ops->MaxBurstLength -
+					burstlength);
+			burstlength = 0;
+			pdu_count = 0;
+			seq_no++;
+			continue;
+		}
+
+		if (!datapduinorder) {
+			pdu[i].type = PDUTYPE_NORMAL;
+			pdu[i++].length = mdsl;
+		}
+		burstlength += mdsl;
+		offset += mdsl;
+	}
+
+	if (!datasequenceinorder) {
+		if (bl->data_direction & ISCSI_PDU_WRITE) {
+			if (bl->randomize & RANDOM_R2T_OFFSETS) {
+				if (iscsit_randomize_seq_lists(cmd, bl->type)
+						< 0)
+					return -1;
+			} else
+				iscsit_ordered_seq_lists(cmd, bl->type);
+		} else if (bl->data_direction & ISCSI_PDU_READ) {
+			if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) {
+				if (iscsit_randomize_seq_lists(cmd, bl->type)
+						< 0)
+					return -1;
+			} else
+				iscsit_ordered_seq_lists(cmd, bl->type);
+		}
+
+		iscsit_dump_seq_list(cmd);
+	}
+	if (!datapduinorder) {
+		if (bl->data_direction & ISCSI_PDU_WRITE) {
+			if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) {
+				if (iscsit_randomize_pdu_lists(cmd, bl->type)
+						< 0)
+					return -1;
+			} else
+				iscsit_ordered_pdu_lists(cmd, bl->type);
+		} else if (bl->data_direction & ISCSI_PDU_READ) {
+			if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) {
+				if (iscsit_randomize_pdu_lists(cmd, bl->type)
+						< 0)
+					return -1;
+			} else
+				iscsit_ordered_pdu_lists(cmd, bl->type);
+		}
+
+		iscsit_dump_pdu_list(cmd);
+	}
+
+	return 0;
+}
+
+int iscsit_build_pdu_and_seq_lists(
+	struct iscsi_cmd *cmd,
+	u32 immediate_data_length)
+{
+	struct iscsi_build_list bl;
+	u32 pdu_count = 0, seq_count = 1;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *pdu = NULL;
+	struct iscsi_seq *seq = NULL;
+
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na;
+
+	/*
+	 * Do nothing if no OOO shenanigans
+	 */
+	if (sess->sess_ops->DataSequenceInOrder &&
+	    sess->sess_ops->DataPDUInOrder)
+		return 0;
+
+	if (cmd->data_direction == DMA_NONE)
+		return 0;
+
+	na = iscsit_tpg_get_node_attrib(sess);
+	memset(&bl, 0, sizeof(struct iscsi_build_list));
+
+	if (cmd->data_direction == DMA_FROM_DEVICE) {
+		bl.data_direction = ISCSI_PDU_READ;
+		bl.type = PDULIST_NORMAL;
+		if (na->random_datain_pdu_offsets)
+			bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
+		if (na->random_datain_seq_offsets)
+			bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
+	} else {
+		bl.data_direction = ISCSI_PDU_WRITE;
+		bl.immediate_data_length = immediate_data_length;
+		if (na->random_r2t_offsets)
+			bl.randomize |= RANDOM_R2T_OFFSETS;
+
+		if (!cmd->immediate_data && !cmd->unsolicited_data)
+			bl.type = PDULIST_NORMAL;
+		else if (cmd->immediate_data && !cmd->unsolicited_data)
+			bl.type = PDULIST_IMMEDIATE;
+		else if (!cmd->immediate_data && cmd->unsolicited_data)
+			bl.type = PDULIST_UNSOLICITED;
+		else if (cmd->immediate_data && cmd->unsolicited_data)
+			bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
+	}
+
+	iscsit_determine_counts_for_list(cmd, &bl, &seq_count, &pdu_count);
+
+	if (!conn->sess->sess_ops->DataSequenceInOrder) {
+		seq = kcalloc(seq_count, sizeof(struct iscsi_seq), GFP_ATOMIC);
+		if (!seq) {
+			pr_err("Unable to allocate struct iscsi_seq list\n");
+			return -ENOMEM;
+		}
+		cmd->seq_list = seq;
+		cmd->seq_count = seq_count;
+	}
+
+	if (!conn->sess->sess_ops->DataPDUInOrder) {
+		pdu = kcalloc(pdu_count, sizeof(struct iscsi_pdu), GFP_ATOMIC);
+		if (!pdu) {
+			pr_err("Unable to allocate struct iscsi_pdu list.\n");
+			kfree(seq);
+			return -ENOMEM;
+		}
+		cmd->pdu_list = pdu;
+		cmd->pdu_count = pdu_count;
+	}
+
+	return iscsit_do_build_pdu_and_seq_lists(cmd, &bl);
+}
+
+struct iscsi_pdu *iscsit_get_pdu_holder(
+	struct iscsi_cmd *cmd,
+	u32 offset,
+	u32 length)
+{
+	u32 i;
+	struct iscsi_pdu *pdu = NULL;
+
+	if (!cmd->pdu_list) {
+		pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+		return NULL;
+	}
+
+	pdu = &cmd->pdu_list[0];
+
+	for (i = 0; i < cmd->pdu_count; i++)
+		if ((pdu[i].offset == offset) && (pdu[i].length == length))
+			return &pdu[i];
+
+	pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:"
+		" %u, Length: %u\n", cmd->init_task_tag, offset, length);
+	return NULL;
+}
+
+struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
+	struct iscsi_cmd *cmd,
+	struct iscsi_seq *seq)
+{
+	u32 i;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *pdu = NULL;
+
+	if (!cmd->pdu_list) {
+		pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+		return NULL;
+	}
+
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+redo:
+		pdu = &cmd->pdu_list[cmd->pdu_start];
+
+		for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
+			pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
+				"_send_order: %d, pdu[i].offset: %d,"
+				" pdu[i].length: %d\n", pdu[i].seq_no,
+				pdu[i].pdu_send_order, pdu[i].offset,
+				pdu[i].length);
+
+			if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
+				cmd->pdu_send_order++;
+				return &pdu[i];
+			}
+		}
+
+		cmd->pdu_start += cmd->pdu_send_order;
+		cmd->pdu_send_order = 0;
+		cmd->seq_no++;
+
+		if (cmd->pdu_start < cmd->pdu_count)
+			goto redo;
+
+		pr_err("Command ITT: 0x%08x unable to locate"
+			" struct iscsi_pdu for cmd->pdu_send_order: %u.\n",
+			cmd->init_task_tag, cmd->pdu_send_order);
+		return NULL;
+	} else {
+		if (!seq) {
+			pr_err("struct iscsi_seq is NULL!\n");
+			return NULL;
+		}
+
+		pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
+			" seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
+			seq->seq_no);
+
+		pdu = &cmd->pdu_list[seq->pdu_start];
+
+		if (seq->pdu_send_order == seq->pdu_count) {
+			pr_err("Command ITT: 0x%08x seq->pdu_send"
+				"_order: %u equals seq->pdu_count: %u\n",
+				cmd->init_task_tag, seq->pdu_send_order,
+				seq->pdu_count);
+			return NULL;
+		}
+
+		for (i = 0; i < seq->pdu_count; i++) {
+			if (pdu[i].pdu_send_order == seq->pdu_send_order) {
+				seq->pdu_send_order++;
+				return &pdu[i];
+			}
+		}
+
+		pr_err("Command ITT: 0x%08x unable to locate iscsi"
+			"_pdu_t for seq->pdu_send_order: %u.\n",
+			cmd->init_task_tag, seq->pdu_send_order);
+		return NULL;
+	}
+
+	return NULL;
+}
+
+struct iscsi_seq *iscsit_get_seq_holder(
+	struct iscsi_cmd *cmd,
+	u32 offset,
+	u32 length)
+{
+	u32 i;
+
+	if (!cmd->seq_list) {
+		pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+		return NULL;
+	}
+
+	for (i = 0; i < cmd->seq_count; i++) {
+		pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
+			"xfer_len: %d, seq_list[i].seq_no %u\n",
+			cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
+			cmd->seq_list[i].seq_no);
+
+		if ((cmd->seq_list[i].orig_offset +
+				cmd->seq_list[i].xfer_len) >=
+				(offset + length))
+			return &cmd->seq_list[i];
+	}
+
+	pr_err("Unable to locate Sequence holder for ITT: 0x%08x,"
+		" Offset: %u, Length: %u\n", cmd->init_task_tag, offset,
+		length);
+	return NULL;
+}
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
new file mode 100644
index 0000000..d5b1537
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -0,0 +1,86 @@
+#ifndef ISCSI_SEQ_AND_PDU_LIST_H
+#define ISCSI_SEQ_AND_PDU_LIST_H
+
+/* struct iscsi_pdu->status */
+#define DATAOUT_PDU_SENT			1
+
+/* struct iscsi_seq->type */
+#define SEQTYPE_IMMEDIATE			1
+#define SEQTYPE_UNSOLICITED			2
+#define SEQTYPE_NORMAL				3
+
+/* struct iscsi_seq->status */
+#define DATAOUT_SEQUENCE_GOT_R2T		1
+#define DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY 2
+#define DATAOUT_SEQUENCE_COMPLETE		3
+
+/* iscsi_determine_counts_for_list() type */
+#define PDULIST_NORMAL				1
+#define PDULIST_IMMEDIATE			2
+#define PDULIST_UNSOLICITED			3
+#define PDULIST_IMMEDIATE_AND_UNSOLICITED	4
+
+/* struct iscsi_pdu->type */
+#define PDUTYPE_IMMEDIATE			1
+#define PDUTYPE_UNSOLICITED			2
+#define PDUTYPE_NORMAL				3
+
+/* struct iscsi_pdu->status */
+#define ISCSI_PDU_NOT_RECEIVED			0
+#define ISCSI_PDU_RECEIVED_OK			1
+#define ISCSI_PDU_CRC_FAILED			2
+#define ISCSI_PDU_TIMED_OUT			3
+
+/* struct iscsi_build_list->randomize */
+#define RANDOM_DATAIN_PDU_OFFSETS		0x01
+#define RANDOM_DATAIN_SEQ_OFFSETS		0x02
+#define RANDOM_DATAOUT_PDU_OFFSETS		0x04
+#define RANDOM_R2T_OFFSETS			0x08
+
+/* struct iscsi_build_list->data_direction */
+#define ISCSI_PDU_READ				0x01
+#define ISCSI_PDU_WRITE				0x02
+
+struct iscsi_build_list {
+	int		data_direction;
+	int		randomize;
+	int		type;
+	int		immediate_data_length;
+};
+
+struct iscsi_pdu {
+	int		status;
+	int		type;
+	u8		flags;
+	u32		data_sn;
+	u32		length;
+	u32		offset;
+	u32		pdu_send_order;
+	u32		seq_no;
+} ____cacheline_aligned;
+
+struct iscsi_seq {
+	int		sent;
+	int		status;
+	int		type;
+	u32		data_sn;
+	u32		first_datasn;
+	u32		last_datasn;
+	u32		next_burst_len;
+	u32		pdu_start;
+	u32		pdu_count;
+	u32		offset;
+	u32		orig_offset;
+	u32		pdu_send_order;
+	u32		r2t_sn;
+	u32		seq_send_order;
+	u32		seq_no;
+	u32		xfer_len;
+} ____cacheline_aligned;
+
+extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
+extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
+extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
+extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
+
+#endif /* ISCSI_SEQ_AND_PDU_LIST_H */
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
new file mode 100644
index 0000000..411cb26
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -0,0 +1,807 @@
+/*******************************************************************************
+ * Modern ConfigFS group context specific iSCSI statistics based on original
+ * iscsi_target_mib.c code
+ *
+ * Copyright (c) 2011-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/configfs.h>
+#include <linux/export.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include <target/iscsi/iscsi_target_stat.h>
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+/* Instance Attributes Table */
+#define ISCSI_INST_NUM_NODES		1
+#define ISCSI_INST_DESCR		"Storage Engine Target"
+#define ISCSI_INST_LAST_FAILURE_TYPE	0
+#define ISCSI_DISCONTINUITY_TIME	0
+
+#define ISCSI_NODE_INDEX		1
+
+#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
+
+/****************************************************************************
+ * iSCSI MIB Tables
+ ****************************************************************************/
+/*
+ * Instance Attributes Table
+ */
+static struct iscsi_tiqn *iscsi_instance_tiqn(struct config_item *item)
+{
+	struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
+			struct iscsi_wwn_stat_grps, iscsi_instance_group);
+	return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
+}
+
+static ssize_t iscsi_stat_instance_inst_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n",
+			iscsi_instance_tiqn(item)->tiqn_index);
+}
+
+static ssize_t iscsi_stat_instance_min_ver_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
+}
+
+static ssize_t iscsi_stat_instance_max_ver_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
+}
+
+static ssize_t iscsi_stat_instance_portals_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n",
+			iscsi_instance_tiqn(item)->tiqn_num_tpg_nps);
+}
+
+static ssize_t iscsi_stat_instance_nodes_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
+}
+
+static ssize_t iscsi_stat_instance_sessions_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n",
+		iscsi_instance_tiqn(item)->tiqn_nsessions);
+}
+
+static ssize_t iscsi_stat_instance_fail_sess_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+	u32 sess_err_count;
+
+	spin_lock_bh(&sess_err->lock);
+	sess_err_count = (sess_err->digest_errors +
+			  sess_err->cxn_timeout_errors +
+			  sess_err->pdu_format_errors);
+	spin_unlock_bh(&sess_err->lock);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
+}
+
+static ssize_t iscsi_stat_instance_fail_type_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n",
+			sess_err->last_sess_failure_type);
+}
+
+static ssize_t iscsi_stat_instance_fail_rem_name_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+	return snprintf(page, PAGE_SIZE, "%s\n",
+			sess_err->last_sess_fail_rem_name[0] ?
+			sess_err->last_sess_fail_rem_name : NONE);
+}
+
+static ssize_t iscsi_stat_instance_disc_time_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
+}
+
+static ssize_t iscsi_stat_instance_description_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
+}
+
+static ssize_t iscsi_stat_instance_vendor_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n");
+}
+
+static ssize_t iscsi_stat_instance_version_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
+}
+
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, min_ver);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, max_ver);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, portals);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, nodes);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, sessions);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_sess);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_type);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_rem_name);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, disc_time);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, description);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, vendor);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, version);
+
+static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
+	&iscsi_stat_instance_attr_inst,
+	&iscsi_stat_instance_attr_min_ver,
+	&iscsi_stat_instance_attr_max_ver,
+	&iscsi_stat_instance_attr_portals,
+	&iscsi_stat_instance_attr_nodes,
+	&iscsi_stat_instance_attr_sessions,
+	&iscsi_stat_instance_attr_fail_sess,
+	&iscsi_stat_instance_attr_fail_type,
+	&iscsi_stat_instance_attr_fail_rem_name,
+	&iscsi_stat_instance_attr_disc_time,
+	&iscsi_stat_instance_attr_description,
+	&iscsi_stat_instance_attr_vendor,
+	&iscsi_stat_instance_attr_version,
+	NULL,
+};
+
+struct config_item_type iscsi_stat_instance_cit = {
+	.ct_attrs		= iscsi_stat_instance_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Instance Session Failure Stats Table
+ */
+static struct iscsi_tiqn *iscsi_sess_err_tiqn(struct config_item *item)
+{
+	struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
+			struct iscsi_wwn_stat_grps, iscsi_sess_err_group);
+	return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
+}
+
+static ssize_t iscsi_stat_sess_err_inst_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n",
+		iscsi_sess_err_tiqn(item)->tiqn_index);
+}
+
+static ssize_t iscsi_stat_sess_err_digest_errors_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
+}
+
+static ssize_t iscsi_stat_sess_err_cxn_errors_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
+}
+
+static ssize_t iscsi_stat_sess_err_format_errors_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
+}
+
+CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, digest_errors);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, cxn_errors);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, format_errors);
+
+static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
+	&iscsi_stat_sess_err_attr_inst,
+	&iscsi_stat_sess_err_attr_digest_errors,
+	&iscsi_stat_sess_err_attr_cxn_errors,
+	&iscsi_stat_sess_err_attr_format_errors,
+	NULL,
+};
+
+struct config_item_type iscsi_stat_sess_err_cit = {
+	.ct_attrs		= iscsi_stat_sess_err_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Target Attributes Table
+ */
+static struct iscsi_tiqn *iscsi_tgt_attr_tiqn(struct config_item *item)
+{
+	struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
+			struct iscsi_wwn_stat_grps, iscsi_tgt_attr_group);
+	return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
+}
+
+static ssize_t iscsi_stat_tgt_attr_inst_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n",
+			iscsi_tgt_attr_tiqn(item)->tiqn_index);
+}
+
+static ssize_t iscsi_stat_tgt_attr_indx_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+
+static ssize_t iscsi_stat_tgt_attr_login_fails_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	u32 fail_count;
+
+	spin_lock(&lstat->lock);
+	fail_count = (lstat->redirects + lstat->authorize_fails +
+			lstat->authenticate_fails + lstat->negotiate_fails +
+			lstat->other_fails);
+	spin_unlock(&lstat->lock);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
+}
+
+static ssize_t iscsi_stat_tgt_attr_last_fail_time_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	u32 last_fail_time;
+
+	spin_lock(&lstat->lock);
+	last_fail_time = lstat->last_fail_time ?
+			(u32)(((u32)lstat->last_fail_time -
+				INITIAL_JIFFIES) * 100 / HZ) : 0;
+	spin_unlock(&lstat->lock);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
+}
+
+static ssize_t iscsi_stat_tgt_attr_last_fail_type_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	u32 last_fail_type;
+
+	spin_lock(&lstat->lock);
+	last_fail_type = lstat->last_fail_type;
+	spin_unlock(&lstat->lock);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
+}
+
+static ssize_t iscsi_stat_tgt_attr_fail_intr_name_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	unsigned char buf[224];
+
+	spin_lock(&lstat->lock);
+	snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
+				lstat->last_intr_fail_name : NONE);
+	spin_unlock(&lstat->lock);
+
+	return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+
+static ssize_t iscsi_stat_tgt_attr_fail_intr_addr_type_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	int ret;
+
+	spin_lock(&lstat->lock);
+	if (lstat->last_intr_fail_ip_family == AF_INET6)
+		ret = snprintf(page, PAGE_SIZE, "ipv6\n");
+	else
+		ret = snprintf(page, PAGE_SIZE, "ipv4\n");
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_tgt_attr_fail_intr_addr_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	int ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%pISc\n", &lstat->last_intr_fail_sockaddr);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, indx);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, login_fails);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, last_fail_time);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, last_fail_type);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_name);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_addr_type);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_addr);
+
+static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
+	&iscsi_stat_tgt_attr_attr_inst,
+	&iscsi_stat_tgt_attr_attr_indx,
+	&iscsi_stat_tgt_attr_attr_login_fails,
+	&iscsi_stat_tgt_attr_attr_last_fail_time,
+	&iscsi_stat_tgt_attr_attr_last_fail_type,
+	&iscsi_stat_tgt_attr_attr_fail_intr_name,
+	&iscsi_stat_tgt_attr_attr_fail_intr_addr_type,
+	&iscsi_stat_tgt_attr_attr_fail_intr_addr,
+	NULL,
+};
+
+struct config_item_type iscsi_stat_tgt_attr_cit = {
+	.ct_attrs		= iscsi_stat_tgt_attr_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Target Login Stats Table
+ */
+static struct iscsi_tiqn *iscsi_login_stat_tiqn(struct config_item *item)
+{
+	struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
+			struct iscsi_wwn_stat_grps, iscsi_login_stats_group);
+	return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
+}
+
+static ssize_t iscsi_stat_login_inst_show(struct config_item *item, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n",
+		iscsi_login_stat_tiqn(item)->tiqn_index);
+}
+
+static ssize_t iscsi_stat_login_indx_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+
+static ssize_t iscsi_stat_login_accepts_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_login_other_fails_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_login_redirects_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_login_authorize_fails_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_login_authenticate_fails_show(
+		struct config_item *item, char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_login_negotiate_fails_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+
+CONFIGFS_ATTR_RO(iscsi_stat_login_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, indx);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, accepts);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, other_fails);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, redirects);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, authorize_fails);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, authenticate_fails);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, negotiate_fails);
+
+static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
+	&iscsi_stat_login_attr_inst,
+	&iscsi_stat_login_attr_indx,
+	&iscsi_stat_login_attr_accepts,
+	&iscsi_stat_login_attr_other_fails,
+	&iscsi_stat_login_attr_redirects,
+	&iscsi_stat_login_attr_authorize_fails,
+	&iscsi_stat_login_attr_authenticate_fails,
+	&iscsi_stat_login_attr_negotiate_fails,
+	NULL,
+};
+
+struct config_item_type iscsi_stat_login_cit = {
+	.ct_attrs		= iscsi_stat_login_stats_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Target Logout Stats Table
+ */
+static struct iscsi_tiqn *iscsi_logout_stat_tiqn(struct config_item *item)
+{
+	struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
+			struct iscsi_wwn_stat_grps, iscsi_logout_stats_group);
+	return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
+}
+
+static ssize_t iscsi_stat_logout_inst_show(struct config_item *item, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n",
+		iscsi_logout_stat_tiqn(item)->tiqn_index);
+}
+
+static ssize_t iscsi_stat_logout_indx_show(struct config_item *item, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+
+static ssize_t iscsi_stat_logout_normal_logouts_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_logout_stat_tiqn(item);
+	struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
+}
+
+static ssize_t iscsi_stat_logout_abnormal_logouts_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_tiqn *tiqn = iscsi_logout_stat_tiqn(item);
+	struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
+}
+
+CONFIGFS_ATTR_RO(iscsi_stat_logout_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_logout_, indx);
+CONFIGFS_ATTR_RO(iscsi_stat_logout_, normal_logouts);
+CONFIGFS_ATTR_RO(iscsi_stat_logout_, abnormal_logouts);
+
+static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
+	&iscsi_stat_logout_attr_inst,
+	&iscsi_stat_logout_attr_indx,
+	&iscsi_stat_logout_attr_normal_logouts,
+	&iscsi_stat_logout_attr_abnormal_logouts,
+	NULL,
+};
+
+struct config_item_type iscsi_stat_logout_cit = {
+	.ct_attrs		= iscsi_stat_logout_stats_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Session Stats Table
+ */
+static struct iscsi_node_acl *iscsi_stat_nacl(struct config_item *item)
+{
+	struct iscsi_node_stat_grps *igrps = container_of(to_config_group(item),
+			struct iscsi_node_stat_grps, iscsi_sess_stats_group);
+	return container_of(igrps, struct iscsi_node_acl, node_stat_grps);
+}
+
+static ssize_t iscsi_stat_sess_inst_show(struct config_item *item, char *page)
+{
+	struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
+	struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
+	struct iscsi_tiqn *tiqn = container_of(wwn,
+			struct iscsi_tiqn, tiqn_wwn);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+
+static ssize_t iscsi_stat_sess_node_show(struct config_item *item, char *page)
+{
+	struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%u\n",
+				sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_sess_indx_show(struct config_item *item, char *page)
+{
+	struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%u\n",
+					sess->session_index);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_sess_cmd_pdus_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%lu\n",
+				       atomic_long_read(&sess->cmd_pdus));
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_sess_rsp_pdus_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%lu\n",
+				       atomic_long_read(&sess->rsp_pdus));
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_sess_txdata_octs_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%lu\n",
+				       atomic_long_read(&sess->tx_data_octets));
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_sess_rxdata_octs_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%lu\n",
+				       atomic_long_read(&sess->rx_data_octets));
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_sess_conn_digest_errors_show(struct config_item *item,
+		char *page)
+{
+	struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%lu\n",
+				       atomic_long_read(&sess->conn_digest_errors));
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+
+static ssize_t iscsi_stat_sess_conn_timeout_errors_show(
+		struct config_item *item, char *page)
+{
+	struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%lu\n",
+				       atomic_long_read(&sess->conn_timeout_errors));
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, node);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, indx);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, cmd_pdus);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, rsp_pdus);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, txdata_octs);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, rxdata_octs);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, conn_digest_errors);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, conn_timeout_errors);
+
+static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
+	&iscsi_stat_sess_attr_inst,
+	&iscsi_stat_sess_attr_node,
+	&iscsi_stat_sess_attr_indx,
+	&iscsi_stat_sess_attr_cmd_pdus,
+	&iscsi_stat_sess_attr_rsp_pdus,
+	&iscsi_stat_sess_attr_txdata_octs,
+	&iscsi_stat_sess_attr_rxdata_octs,
+	&iscsi_stat_sess_attr_conn_digest_errors,
+	&iscsi_stat_sess_attr_conn_timeout_errors,
+	NULL,
+};
+
+struct config_item_type iscsi_stat_sess_cit = {
+	.ct_attrs		= iscsi_stat_sess_stats_attrs,
+	.ct_owner		= THIS_MODULE,
+};
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
new file mode 100644
index 0000000..11320df
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -0,0 +1,849 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target specific Task Management functions.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <asm/unaligned.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_tmr.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+u8 iscsit_tmr_abort_task(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	struct iscsi_cmd *ref_cmd;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+	struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
+
+	ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt);
+	if (!ref_cmd) {
+		pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
+			" %hu.\n", hdr->rtt, conn->cid);
+		return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
+			iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), (u32) atomic_read(&conn->sess->max_cmd_sn))) ?
+			ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
+	}
+	if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
+		pr_err("RefCmdSN 0x%08x does not equal"
+			" task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
+			hdr->refcmdsn, ref_cmd->cmd_sn);
+		return ISCSI_TMF_RSP_REJECTED;
+	}
+
+	se_tmr->ref_task_tag		= (__force u32)hdr->rtt;
+	tmr_req->ref_cmd		= ref_cmd;
+	tmr_req->exp_data_sn		= be32_to_cpu(hdr->exp_datasn);
+
+	return ISCSI_TMF_RSP_COMPLETE;
+}
+
+/*
+ *	Called from iscsit_handle_task_mgt_cmd().
+ */
+int iscsit_tmr_task_warm_reset(
+	struct iscsi_conn *conn,
+	struct iscsi_tmr_req *tmr_req,
+	unsigned char *buf)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	if (!na->tmr_warm_reset) {
+		pr_err("TMR Opcode TARGET_WARM_RESET authorization"
+			" failed for Initiator Node: %s\n",
+			sess->se_sess->se_node_acl->initiatorname);
+		 return -1;
+	}
+	/*
+	 * Do the real work in transport_generic_do_tmr().
+	 */
+	return 0;
+}
+
+int iscsit_tmr_task_cold_reset(
+	struct iscsi_conn *conn,
+	struct iscsi_tmr_req *tmr_req,
+	unsigned char *buf)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	if (!na->tmr_cold_reset) {
+		pr_err("TMR Opcode TARGET_COLD_RESET authorization"
+			" failed for Initiator Node: %s\n",
+			sess->se_sess->se_node_acl->initiatorname);
+		return -1;
+	}
+	/*
+	 * Do the real work in transport_generic_do_tmr().
+	 */
+	return 0;
+}
+
+u8 iscsit_tmr_task_reassign(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	struct iscsi_cmd *ref_cmd = NULL;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_conn_recovery *cr = NULL;
+	struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+	struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
+	u64 ret, ref_lun;
+
+	pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
+		" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
+		hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid);
+
+	if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) {
+		pr_err("TMR TASK_REASSIGN not supported in ERL<2,"
+				" ignoring request.\n");
+		return ISCSI_TMF_RSP_NOT_SUPPORTED;
+	}
+
+	ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt);
+	if (ret == -2) {
+		pr_err("Command ITT: 0x%08x is still alligent to CID:"
+			" %hu\n", ref_cmd->init_task_tag, cr->cid);
+		return ISCSI_TMF_RSP_TASK_ALLEGIANT;
+	} else if (ret == -1) {
+		pr_err("Unable to locate RefTaskTag: 0x%08x in"
+			" connection recovery command list.\n", hdr->rtt);
+		return ISCSI_TMF_RSP_NO_TASK;
+	}
+	/*
+	 * Temporary check to prevent connection recovery for
+	 * connections with a differing Max*DataSegmentLength.
+	 */
+	if (cr->maxrecvdatasegmentlength !=
+	    conn->conn_ops->MaxRecvDataSegmentLength) {
+		pr_err("Unable to perform connection recovery for"
+			" differing MaxRecvDataSegmentLength, rejecting"
+			" TMR TASK_REASSIGN.\n");
+		return ISCSI_TMF_RSP_REJECTED;
+	}
+	if (cr->maxxmitdatasegmentlength !=
+	    conn->conn_ops->MaxXmitDataSegmentLength) {
+		pr_err("Unable to perform connection recovery for"
+			" differing MaxXmitDataSegmentLength, rejecting"
+			" TMR TASK_REASSIGN.\n");
+		return ISCSI_TMF_RSP_REJECTED;
+	}
+
+	ref_lun = scsilun_to_int(&hdr->lun);
+	if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
+		pr_err("Unable to perform connection recovery for"
+			" differing ref_lun: %llu ref_cmd orig_fe_lun: %llu\n",
+			ref_lun, ref_cmd->se_cmd.orig_fe_lun);
+		return ISCSI_TMF_RSP_REJECTED;
+	}
+
+	se_tmr->ref_task_tag		= (__force u32)hdr->rtt;
+	tmr_req->ref_cmd		= ref_cmd;
+	tmr_req->exp_data_sn		= be32_to_cpu(hdr->exp_datasn);
+	tmr_req->conn_recovery		= cr;
+	tmr_req->task_reassign		= 1;
+	/*
+	 * Command can now be reassigned to a new connection.
+	 * The task management response must be sent before the
+	 * reassignment actually happens.  See iscsi_tmr_post_handler().
+	 */
+	return ISCSI_TMF_RSP_COMPLETE;
+}
+
+static void iscsit_task_reassign_remove_cmd(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn_recovery *cr,
+	struct iscsi_session *sess)
+{
+	int ret;
+
+	spin_lock(&cr->conn_recovery_cmd_lock);
+	ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess);
+	spin_unlock(&cr->conn_recovery_cmd_lock);
+	if (!ret) {
+		pr_debug("iSCSI connection recovery successful for CID:"
+			" %hu on SID: %u\n", cr->cid, sess->sid);
+		iscsit_remove_active_connection_recovery_entry(cr, sess);
+	}
+}
+
+static int iscsit_task_reassign_complete_nop_out(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd = tmr_req->ref_cmd;
+	struct iscsi_conn_recovery *cr;
+
+	if (!cmd->cr) {
+		pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+			" is NULL!\n", cmd->init_task_tag);
+		return -1;
+	}
+	cr = cmd->cr;
+
+	/*
+	 * Reset the StatSN so a new one for this commands new connection
+	 * will be assigned.
+	 * Reset the ExpStatSN as well so we may receive Status SNACKs.
+	 */
+	cmd->stat_sn = cmd->exp_stat_sn = 0;
+
+	iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	cmd->i_state = ISTATE_SEND_NOPIN;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+	return 0;
+}
+
+static int iscsit_task_reassign_complete_write(
+	struct iscsi_cmd *cmd,
+	struct iscsi_tmr_req *tmr_req)
+{
+	int no_build_r2ts = 0;
+	u32 length = 0, offset = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	/*
+	 * The Initiator must not send a R2T SNACK with a Begrun less than
+	 * the TMR TASK_REASSIGN's ExpDataSN.
+	 */
+	if (!tmr_req->exp_data_sn) {
+		cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
+		cmd->acked_data_sn = 0;
+	} else {
+		cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+		cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
+	}
+
+	/*
+	 * The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the
+	 * Initiator is expecting.  The Target controls all WRITE operations
+	 * so if we have received all DataOUT we can safety ignore Initiator.
+	 */
+	if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+		if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) {
+			pr_debug("WRITE ITT: 0x%08x: t_state: %d"
+				" never sent to transport\n",
+				cmd->init_task_tag, cmd->se_cmd.t_state);
+			target_execute_cmd(se_cmd);
+			return 0;
+		}
+
+		cmd->i_state = ISTATE_SEND_STATUS;
+		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+		return 0;
+	}
+
+	/*
+	 * Special case to deal with DataSequenceInOrder=No and Non-Immeidate
+	 * Unsolicited DataOut.
+	 */
+	if (cmd->unsolicited_data) {
+		cmd->unsolicited_data = 0;
+
+		offset = cmd->next_burst_len = cmd->write_data_done;
+
+		if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
+		     cmd->se_cmd.data_length) {
+			no_build_r2ts = 1;
+			length = (cmd->se_cmd.data_length - offset);
+		} else
+			length = (conn->sess->sess_ops->FirstBurstLength - offset);
+
+		spin_lock_bh(&cmd->r2t_lock);
+		if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) {
+			spin_unlock_bh(&cmd->r2t_lock);
+			return -1;
+		}
+		cmd->outstanding_r2ts++;
+		spin_unlock_bh(&cmd->r2t_lock);
+
+		if (no_build_r2ts)
+			return 0;
+	}
+	/*
+	 * iscsit_build_r2ts_for_cmd() can handle the rest from here.
+	 */
+	return conn->conn_transport->iscsit_get_dataout(conn, cmd, true);
+}
+
+static int iscsit_task_reassign_complete_read(
+	struct iscsi_cmd *cmd,
+	struct iscsi_tmr_req *tmr_req)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	/*
+	 * The Initiator must not send a Data SNACK with a BegRun less than
+	 * the TMR TASK_REASSIGN's ExpDataSN.
+	 */
+	if (!tmr_req->exp_data_sn) {
+		cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
+		cmd->acked_data_sn = 0;
+	} else {
+		cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+		cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
+	}
+
+	if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) {
+		pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
+			" transport\n", cmd->init_task_tag,
+			cmd->se_cmd.t_state);
+		transport_handle_cdb_direct(se_cmd);
+		return 0;
+	}
+
+	if (!(se_cmd->transport_state & CMD_T_COMPLETE)) {
+		pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
+			" from transport\n", cmd->init_task_tag,
+			cmd->se_cmd.t_state);
+		return -1;
+	}
+
+	dr = iscsit_allocate_datain_req();
+	if (!dr)
+		return -1;
+	/*
+	 * The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the
+	 * Initiator is expecting.
+	 */
+	dr->data_sn = dr->begrun = tmr_req->exp_data_sn;
+	dr->runlength = 0;
+	dr->generate_recovery_values = 1;
+	dr->recovery = DATAIN_CONNECTION_RECOVERY;
+
+	iscsit_attach_datain_req(cmd, dr);
+
+	cmd->i_state = ISTATE_SEND_DATAIN;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+	return 0;
+}
+
+static int iscsit_task_reassign_complete_none(
+	struct iscsi_cmd *cmd,
+	struct iscsi_tmr_req *tmr_req)
+{
+	struct iscsi_conn *conn = cmd->conn;
+
+	cmd->i_state = ISTATE_SEND_STATUS;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+	return 0;
+}
+
+static int iscsit_task_reassign_complete_scsi_cmnd(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd = tmr_req->ref_cmd;
+	struct iscsi_conn_recovery *cr;
+
+	if (!cmd->cr) {
+		pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+			" is NULL!\n", cmd->init_task_tag);
+		return -1;
+	}
+	cr = cmd->cr;
+
+	/*
+	 * Reset the StatSN so a new one for this commands new connection
+	 * will be assigned.
+	 * Reset the ExpStatSN as well so we may receive Status SNACKs.
+	 */
+	cmd->stat_sn = cmd->exp_stat_sn = 0;
+
+	iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	if (cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		cmd->i_state = ISTATE_SEND_STATUS;
+		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+		return 0;
+	}
+
+	switch (cmd->data_direction) {
+	case DMA_TO_DEVICE:
+		return iscsit_task_reassign_complete_write(cmd, tmr_req);
+	case DMA_FROM_DEVICE:
+		return iscsit_task_reassign_complete_read(cmd, tmr_req);
+	case DMA_NONE:
+		return iscsit_task_reassign_complete_none(cmd, tmr_req);
+	default:
+		pr_err("Unknown cmd->data_direction: 0x%02x\n",
+				cmd->data_direction);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int iscsit_task_reassign_complete(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd;
+	int ret = 0;
+
+	if (!tmr_req->ref_cmd) {
+		pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
+		return -1;
+	}
+	cmd = tmr_req->ref_cmd;
+
+	cmd->conn = conn;
+
+	switch (cmd->iscsi_opcode) {
+	case ISCSI_OP_NOOP_OUT:
+		ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn);
+		break;
+	case ISCSI_OP_SCSI_CMD:
+		ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn);
+		break;
+	default:
+		 pr_err("Illegal iSCSI Opcode 0x%02x during"
+			" command realligence\n", cmd->iscsi_opcode);
+		return -1;
+	}
+
+	if (ret != 0)
+		return ret;
+
+	pr_debug("Completed connection realligence for Opcode: 0x%02x,"
+		" ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
+			cmd->init_task_tag, conn->cid);
+
+	return 0;
+}
+
+/*
+ *	Handles special after-the-fact actions related to TMRs.
+ *	Right now the only one that its really needed for is
+ *	connection recovery releated TASK_REASSIGN.
+ */
+int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+
+	if (tmr_req->task_reassign &&
+	   (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
+		return iscsit_task_reassign_complete(tmr_req, conn);
+
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_tmr_post_handler);
+
+/*
+ *	Nothing to do here, but leave it for good measure. :-)
+ */
+static int iscsit_task_reassign_prepare_read(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	return 0;
+}
+
+static void iscsit_task_reassign_prepare_unsolicited_dataout(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	int i, j;
+	struct iscsi_pdu *pdu = NULL;
+	struct iscsi_seq *seq = NULL;
+
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		cmd->data_sn = 0;
+
+		if (cmd->immediate_data)
+			cmd->r2t_offset += (cmd->first_burst_len -
+				cmd->seq_start_offset);
+
+		if (conn->sess->sess_ops->DataPDUInOrder) {
+			cmd->write_data_done -= (cmd->immediate_data) ?
+						(cmd->first_burst_len -
+						 cmd->seq_start_offset) :
+						 cmd->first_burst_len;
+			cmd->first_burst_len = 0;
+			return;
+		}
+
+		for (i = 0; i < cmd->pdu_count; i++) {
+			pdu = &cmd->pdu_list[i];
+
+			if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+				continue;
+
+			if ((pdu->offset >= cmd->seq_start_offset) &&
+			   ((pdu->offset + pdu->length) <=
+			     cmd->seq_end_offset)) {
+				cmd->first_burst_len -= pdu->length;
+				cmd->write_data_done -= pdu->length;
+				pdu->status = ISCSI_PDU_NOT_RECEIVED;
+			}
+		}
+	} else {
+		for (i = 0; i < cmd->seq_count; i++) {
+			seq = &cmd->seq_list[i];
+
+			if (seq->type != SEQTYPE_UNSOLICITED)
+				continue;
+
+			cmd->write_data_done -=
+					(seq->offset - seq->orig_offset);
+			cmd->first_burst_len = 0;
+			seq->data_sn = 0;
+			seq->offset = seq->orig_offset;
+			seq->next_burst_len = 0;
+			seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+			if (conn->sess->sess_ops->DataPDUInOrder)
+				continue;
+
+			for (j = 0; j < seq->pdu_count; j++) {
+				pdu = &cmd->pdu_list[j+seq->pdu_start];
+
+				if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+					continue;
+
+				pdu->status = ISCSI_PDU_NOT_RECEIVED;
+			}
+		}
+	}
+}
+
+static int iscsit_task_reassign_prepare_write(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd = tmr_req->ref_cmd;
+	struct iscsi_pdu *pdu = NULL;
+	struct iscsi_r2t *r2t = NULL, *r2t_tmp;
+	int first_incomplete_r2t = 1, i = 0;
+
+	/*
+	 * The command was in the process of receiving Unsolicited DataOUT when
+	 * the connection failed.
+	 */
+	if (cmd->unsolicited_data)
+		iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn);
+
+	/*
+	 * The Initiator is requesting R2Ts starting from zero,  skip
+	 * checking acknowledged R2Ts and start checking struct iscsi_r2ts
+	 * greater than zero.
+	 */
+	if (!tmr_req->exp_data_sn)
+		goto drop_unacknowledged_r2ts;
+
+	/*
+	 * We now check that the PDUs in DataOUT sequences below
+	 * the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is
+	 * expecting next) have all the DataOUT they require to complete
+	 * the DataOUT sequence.  First scan from R2TSN 0 to TMR
+	 * TASK_REASSIGN ExpDataSN-1.
+	 *
+	 * If we have not received all DataOUT in question,  we must
+	 * make sure to make the appropriate changes to values in
+	 * struct iscsi_cmd (and elsewhere depending on session parameters)
+	 * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
+	 * will resend a new R2T for the DataOUT sequences in question.
+	 */
+	spin_lock_bh(&cmd->r2t_lock);
+	if (list_empty(&cmd->cmd_r2t_list)) {
+		spin_unlock_bh(&cmd->r2t_lock);
+		return -1;
+	}
+
+	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+
+		if (r2t->r2t_sn >= tmr_req->exp_data_sn)
+			continue;
+		/*
+		 * Safely ignore Recovery R2Ts and R2Ts that have completed
+		 * DataOUT sequences.
+		 */
+		if (r2t->seq_complete)
+			continue;
+
+		if (r2t->recovery_r2t)
+			continue;
+
+		/*
+		 *                 DataSequenceInOrder=Yes:
+		 *
+		 * Taking into account the iSCSI implementation requirement of
+		 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
+		 * DataSequenceInOrder=Yes, we must take into consideration
+		 * the following:
+		 *
+		 *                  DataSequenceInOrder=No:
+		 *
+		 * Taking into account that the Initiator controls the (possibly
+		 * random) PDU Order in (possibly random) Sequence Order of
+		 * DataOUT the target requests with R2Ts,  we must take into
+		 * consideration the following:
+		 *
+		 *      DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]:
+		 *
+		 * While processing non-complete R2T DataOUT sequence requests
+		 * the Target will re-request only the total sequence length
+		 * minus current received offset.  This is because we must
+		 * assume the initiator will continue sending DataOUT from the
+		 * last PDU before the connection failed.
+		 *
+		 *      DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]:
+		 *
+		 * While processing non-complete R2T DataOUT sequence requests
+		 * the Target will re-request the entire DataOUT sequence if
+		 * any single PDU is missing from the sequence.  This is because
+		 * we have no logical method to determine the next PDU offset,
+		 * and we must assume the Initiator will be sending any random
+		 * PDU offset in the current sequence after TASK_REASSIGN
+		 * has completed.
+		 */
+		if (conn->sess->sess_ops->DataSequenceInOrder) {
+			if (!first_incomplete_r2t) {
+				cmd->r2t_offset -= r2t->xfer_len;
+				goto next;
+			}
+
+			if (conn->sess->sess_ops->DataPDUInOrder) {
+				cmd->data_sn = 0;
+				cmd->r2t_offset -= (r2t->xfer_len -
+					cmd->next_burst_len);
+				first_incomplete_r2t = 0;
+				goto next;
+			}
+
+			cmd->data_sn = 0;
+			cmd->r2t_offset -= r2t->xfer_len;
+
+			for (i = 0; i < cmd->pdu_count; i++) {
+				pdu = &cmd->pdu_list[i];
+
+				if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+					continue;
+
+				if ((pdu->offset >= r2t->offset) &&
+				    (pdu->offset < (r2t->offset +
+						r2t->xfer_len))) {
+					cmd->next_burst_len -= pdu->length;
+					cmd->write_data_done -= pdu->length;
+					pdu->status = ISCSI_PDU_NOT_RECEIVED;
+				}
+			}
+
+			first_incomplete_r2t = 0;
+		} else {
+			struct iscsi_seq *seq;
+
+			seq = iscsit_get_seq_holder(cmd, r2t->offset,
+					r2t->xfer_len);
+			if (!seq) {
+				spin_unlock_bh(&cmd->r2t_lock);
+				return -1;
+			}
+
+			cmd->write_data_done -=
+					(seq->offset - seq->orig_offset);
+			seq->data_sn = 0;
+			seq->offset = seq->orig_offset;
+			seq->next_burst_len = 0;
+			seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+			cmd->seq_send_order--;
+
+			if (conn->sess->sess_ops->DataPDUInOrder)
+				goto next;
+
+			for (i = 0; i < seq->pdu_count; i++) {
+				pdu = &cmd->pdu_list[i+seq->pdu_start];
+
+				if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+					continue;
+
+				pdu->status = ISCSI_PDU_NOT_RECEIVED;
+			}
+		}
+
+next:
+		cmd->outstanding_r2ts--;
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	/*
+	 * We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR
+	 * TASK_REASSIGN to the last R2T in the list..  We are also careful
+	 * to check that the Initiator is not requesting R2Ts for DataOUT
+	 * sequences it has already completed.
+	 *
+	 * Free each R2T in question and adjust values in struct iscsi_cmd
+	 * accordingly so iscsit_build_r2ts_for_cmd() do the rest of
+	 * the work after the TMR TASK_REASSIGN Response is sent.
+	 */
+drop_unacknowledged_r2ts:
+
+	cmd->cmd_flags &= ~ICF_SENT_LAST_R2T;
+	cmd->r2t_sn = tmr_req->exp_data_sn;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) {
+		/*
+		 * Skip up to the R2T Sequence number provided by the
+		 * iSCSI TASK_REASSIGN TMR
+		 */
+		if (r2t->r2t_sn < tmr_req->exp_data_sn)
+			continue;
+
+		if (r2t->seq_complete) {
+			pr_err("Initiator is requesting R2Ts from"
+				" R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u,"
+				" Length: %u is already complete."
+				"   BAD INITIATOR ERL=2 IMPLEMENTATION!\n",
+				tmr_req->exp_data_sn, r2t->r2t_sn,
+				r2t->offset, r2t->xfer_len);
+			spin_unlock_bh(&cmd->r2t_lock);
+			return -1;
+		}
+
+		if (r2t->recovery_r2t) {
+			iscsit_free_r2t(r2t, cmd);
+			continue;
+		}
+
+		/*		   DataSequenceInOrder=Yes:
+		 *
+		 * Taking into account the iSCSI implementation requirement of
+		 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
+		 * DataSequenceInOrder=Yes, it's safe to subtract the R2Ts
+		 * entire transfer length from the commands R2T offset marker.
+		 *
+		 *		   DataSequenceInOrder=No:
+		 *
+		 * We subtract the difference from struct iscsi_seq between the
+		 * current offset and original offset from cmd->write_data_done
+		 * for account for DataOUT PDUs already received.  Then reset
+		 * the current offset to the original and zero out the current
+		 * burst length,  to make sure we re-request the entire DataOUT
+		 * sequence.
+		 */
+		if (conn->sess->sess_ops->DataSequenceInOrder)
+			cmd->r2t_offset -= r2t->xfer_len;
+		else
+			cmd->seq_send_order--;
+
+		cmd->outstanding_r2ts--;
+		iscsit_free_r2t(r2t, cmd);
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return 0;
+}
+
+/*
+ *	Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
+ *	a given struct iscsi_cmd.
+ */
+int iscsit_check_task_reassign_expdatasn(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *ref_cmd = tmr_req->ref_cmd;
+
+	if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
+		return 0;
+
+	if (ref_cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+		return 0;
+
+	if (ref_cmd->data_direction == DMA_NONE)
+		return 0;
+
+	/*
+	 * For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN
+	 * of DataIN the Initiator is expecting.
+	 *
+	 * Also check that the Initiator is not re-requesting DataIN that has
+	 * already been acknowledged with a DataAck SNACK.
+	 */
+	if (ref_cmd->data_direction == DMA_FROM_DEVICE) {
+		if (tmr_req->exp_data_sn > ref_cmd->data_sn) {
+			pr_err("Received ExpDataSN: 0x%08x for READ"
+				" in TMR TASK_REASSIGN greater than command's"
+				" DataSN: 0x%08x.\n", tmr_req->exp_data_sn,
+				ref_cmd->data_sn);
+			return -1;
+		}
+		if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+		    (tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) {
+			pr_err("Received ExpDataSN: 0x%08x for READ"
+				" in TMR TASK_REASSIGN for previously"
+				" acknowledged DataIN: 0x%08x,"
+				" protocol error\n", tmr_req->exp_data_sn,
+				ref_cmd->acked_data_sn);
+			return -1;
+		}
+		return iscsit_task_reassign_prepare_read(tmr_req, conn);
+	}
+
+	/*
+	 * For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN
+	 * for R2Ts the Initiator is expecting.
+	 *
+	 * Do the magic in iscsit_task_reassign_prepare_write().
+	 */
+	if (ref_cmd->data_direction == DMA_TO_DEVICE) {
+		if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) {
+			pr_err("Received ExpDataSN: 0x%08x for WRITE"
+				" in TMR TASK_REASSIGN greater than command's"
+				" R2TSN: 0x%08x.\n", tmr_req->exp_data_sn,
+					ref_cmd->r2t_sn);
+			return -1;
+		}
+		return iscsit_task_reassign_prepare_write(tmr_req, conn);
+	}
+
+	pr_err("Unknown iSCSI data_direction: 0x%02x\n",
+			ref_cmd->data_direction);
+
+	return -1;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
new file mode 100644
index 0000000..142e992
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -0,0 +1,14 @@
+#ifndef ISCSI_TARGET_TMR_H
+#define ISCSI_TARGET_TMR_H
+
+extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+			unsigned char *);
+extern int iscsit_tmr_task_cold_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+			unsigned char *);
+extern u8 iscsit_tmr_task_reassign(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *,
+			struct iscsi_conn *);
+
+#endif /* ISCSI_TARGET_TMR_H */
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
new file mode 100644
index 0000000..63e1dcc
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -0,0 +1,920 @@
+/*******************************************************************************
+ * This file contains iSCSI Target Portal Group related functions.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_nodeattrib.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_parameters.h"
+
+#include <target/iscsi/iscsi_transport.h>
+
+struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
+{
+	struct iscsi_portal_group *tpg;
+
+	tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct iscsi_portal_group\n");
+		return NULL;
+	}
+
+	tpg->tpgt = tpgt;
+	tpg->tpg_state = TPG_STATE_FREE;
+	tpg->tpg_tiqn = tiqn;
+	INIT_LIST_HEAD(&tpg->tpg_gnp_list);
+	INIT_LIST_HEAD(&tpg->tpg_list);
+	mutex_init(&tpg->tpg_access_lock);
+	sema_init(&tpg->np_login_sem, 1);
+	spin_lock_init(&tpg->tpg_state_lock);
+	spin_lock_init(&tpg->tpg_np_lock);
+
+	return tpg;
+}
+
+static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
+
+int iscsit_load_discovery_tpg(void)
+{
+	struct iscsi_param *param;
+	struct iscsi_portal_group *tpg;
+	int ret;
+
+	tpg = iscsit_alloc_portal_group(NULL, 1);
+	if (!tpg) {
+		pr_err("Unable to allocate struct iscsi_portal_group\n");
+		return -1;
+	}
+	/*
+	 * Save iscsi_ops pointer for special case discovery TPG that
+	 * doesn't exist as se_wwn->wwn_group within configfs.
+	 */
+	tpg->tpg_se_tpg.se_tpg_tfo = &iscsi_ops;
+	ret = core_tpg_register(NULL, &tpg->tpg_se_tpg, -1);
+	if (ret < 0) {
+		kfree(tpg);
+		return -1;
+	}
+
+	tpg->sid = 1; /* First Assigned LIO Session ID */
+	iscsit_set_default_tpg_attribs(tpg);
+
+	if (iscsi_create_default_params(&tpg->param_list) < 0)
+		goto out;
+	/*
+	 * By default we disable authentication for discovery sessions,
+	 * this can be changed with:
+	 *
+	 * /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth
+	 */
+	param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+	if (!param)
+		goto out;
+
+	if (iscsi_update_param_value(param, "CHAP,None") < 0)
+		goto out;
+
+	tpg->tpg_attrib.authentication = 0;
+
+	spin_lock(&tpg->tpg_state_lock);
+	tpg->tpg_state  = TPG_STATE_ACTIVE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	iscsit_global->discovery_tpg = tpg;
+	pr_debug("CORE[0] - Allocated Discovery TPG\n");
+
+	return 0;
+out:
+	if (tpg->sid == 1)
+		core_tpg_deregister(&tpg->tpg_se_tpg);
+	kfree(tpg);
+	return -1;
+}
+
+void iscsit_release_discovery_tpg(void)
+{
+	struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg;
+
+	if (!tpg)
+		return;
+
+	core_tpg_deregister(&tpg->tpg_se_tpg);
+
+	kfree(tpg);
+	iscsit_global->discovery_tpg = NULL;
+}
+
+struct iscsi_portal_group *iscsit_get_tpg_from_np(
+	struct iscsi_tiqn *tiqn,
+	struct iscsi_np *np,
+	struct iscsi_tpg_np **tpg_np_out)
+{
+	struct iscsi_portal_group *tpg = NULL;
+	struct iscsi_tpg_np *tpg_np;
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+		spin_lock(&tpg->tpg_state_lock);
+		if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+			spin_unlock(&tpg->tpg_state_lock);
+			continue;
+		}
+		spin_unlock(&tpg->tpg_state_lock);
+
+		spin_lock(&tpg->tpg_np_lock);
+		list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+			if (tpg_np->tpg_np == np) {
+				*tpg_np_out = tpg_np;
+				kref_get(&tpg_np->tpg_np_kref);
+				spin_unlock(&tpg->tpg_np_lock);
+				spin_unlock(&tiqn->tiqn_tpg_lock);
+				return tpg;
+			}
+		}
+		spin_unlock(&tpg->tpg_np_lock);
+	}
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	return NULL;
+}
+
+int iscsit_get_tpg(
+	struct iscsi_portal_group *tpg)
+{
+	return mutex_lock_interruptible(&tpg->tpg_access_lock);
+}
+
+void iscsit_put_tpg(struct iscsi_portal_group *tpg)
+{
+	mutex_unlock(&tpg->tpg_access_lock);
+}
+
+static void iscsit_clear_tpg_np_login_thread(
+	struct iscsi_tpg_np *tpg_np,
+	struct iscsi_portal_group *tpg,
+	bool shutdown)
+{
+	if (!tpg_np->tpg_np) {
+		pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
+		return;
+	}
+
+	if (shutdown)
+		tpg_np->tpg_np->enabled = false;
+	iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
+}
+
+static void iscsit_clear_tpg_np_login_threads(
+	struct iscsi_portal_group *tpg,
+	bool shutdown)
+{
+	struct iscsi_tpg_np *tpg_np;
+
+	spin_lock(&tpg->tpg_np_lock);
+	list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+		if (!tpg_np->tpg_np) {
+			pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
+			continue;
+		}
+		spin_unlock(&tpg->tpg_np_lock);
+		iscsit_clear_tpg_np_login_thread(tpg_np, tpg, shutdown);
+		spin_lock(&tpg->tpg_np_lock);
+	}
+	spin_unlock(&tpg->tpg_np_lock);
+}
+
+void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
+{
+	iscsi_print_params(tpg->param_list);
+}
+
+static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	a->authentication = TA_AUTHENTICATION;
+	a->login_timeout = TA_LOGIN_TIMEOUT;
+	a->netif_timeout = TA_NETIF_TIMEOUT;
+	a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
+	a->generate_node_acls = TA_GENERATE_NODE_ACLS;
+	a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
+	a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
+	a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
+	a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
+	a->default_erl = TA_DEFAULT_ERL;
+	a->t10_pi = TA_DEFAULT_T10_PI;
+	a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
+	a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
+	a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND;
+}
+
+int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
+{
+	if (tpg->tpg_state != TPG_STATE_FREE) {
+		pr_err("Unable to add iSCSI Target Portal Group: %d"
+			" while not in TPG_STATE_FREE state.\n", tpg->tpgt);
+		return -EEXIST;
+	}
+	iscsit_set_default_tpg_attribs(tpg);
+
+	if (iscsi_create_default_params(&tpg->param_list) < 0)
+		goto err_out;
+
+	tpg->tpg_attrib.tpg = tpg;
+
+	spin_lock(&tpg->tpg_state_lock);
+	tpg->tpg_state	= TPG_STATE_INACTIVE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list);
+	tiqn->tiqn_ntpgs++;
+	pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n",
+			tiqn->tiqn, tpg->tpgt);
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	return 0;
+err_out:
+	if (tpg->param_list) {
+		iscsi_release_param_list(tpg->param_list);
+		tpg->param_list = NULL;
+	}
+	return -ENOMEM;
+}
+
+int iscsit_tpg_del_portal_group(
+	struct iscsi_tiqn *tiqn,
+	struct iscsi_portal_group *tpg,
+	int force)
+{
+	u8 old_state = tpg->tpg_state;
+
+	spin_lock(&tpg->tpg_state_lock);
+	tpg->tpg_state = TPG_STATE_INACTIVE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
+		pr_err("Unable to delete iSCSI Target Portal Group:"
+			" %hu while active sessions exist, and force=0\n",
+			tpg->tpgt);
+		tpg->tpg_state = old_state;
+		return -EPERM;
+	}
+
+	if (tpg->param_list) {
+		iscsi_release_param_list(tpg->param_list);
+		tpg->param_list = NULL;
+	}
+
+	core_tpg_deregister(&tpg->tpg_se_tpg);
+
+	spin_lock(&tpg->tpg_state_lock);
+	tpg->tpg_state = TPG_STATE_FREE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	tiqn->tiqn_ntpgs--;
+	list_del(&tpg->tpg_list);
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n",
+			tiqn->tiqn, tpg->tpgt);
+
+	kfree(tpg);
+	return 0;
+}
+
+int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
+{
+	struct iscsi_param *param;
+	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+	int ret;
+
+	spin_lock(&tpg->tpg_state_lock);
+	if (tpg->tpg_state == TPG_STATE_ACTIVE) {
+		pr_err("iSCSI target portal group: %hu is already"
+			" active, ignoring request.\n", tpg->tpgt);
+		spin_unlock(&tpg->tpg_state_lock);
+		return -EINVAL;
+	}
+	/*
+	 * Make sure that AuthMethod does not contain None as an option
+	 * unless explictly disabled.  Set the default to CHAP if authentication
+	 * is enforced (as per default), and remove the NONE option.
+	 */
+	param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+	if (!param) {
+		spin_unlock(&tpg->tpg_state_lock);
+		return -EINVAL;
+	}
+
+	if (tpg->tpg_attrib.authentication) {
+		if (!strcmp(param->value, NONE)) {
+			ret = iscsi_update_param_value(param, CHAP);
+			if (ret)
+				goto err;
+		}
+
+		ret = iscsit_ta_authentication(tpg, 1);
+		if (ret < 0)
+			goto err;
+	}
+
+	tpg->tpg_state = TPG_STATE_ACTIVE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	tiqn->tiqn_active_tpgs++;
+	pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n",
+			tpg->tpgt);
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	return 0;
+
+err:
+	spin_unlock(&tpg->tpg_state_lock);
+	return ret;
+}
+
+int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
+{
+	struct iscsi_tiqn *tiqn;
+	u8 old_state = tpg->tpg_state;
+
+	spin_lock(&tpg->tpg_state_lock);
+	if (tpg->tpg_state == TPG_STATE_INACTIVE) {
+		pr_err("iSCSI Target Portal Group: %hu is already"
+			" inactive, ignoring request.\n", tpg->tpgt);
+		spin_unlock(&tpg->tpg_state_lock);
+		return -EINVAL;
+	}
+	tpg->tpg_state = TPG_STATE_INACTIVE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	iscsit_clear_tpg_np_login_threads(tpg, false);
+
+	if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
+		spin_lock(&tpg->tpg_state_lock);
+		tpg->tpg_state = old_state;
+		spin_unlock(&tpg->tpg_state_lock);
+		pr_err("Unable to disable iSCSI Target Portal Group:"
+			" %hu while active sessions exist, and force=0\n",
+			tpg->tpgt);
+		return -EPERM;
+	}
+
+	tiqn = tpg->tpg_tiqn;
+	if (!tiqn || (tpg == iscsit_global->discovery_tpg))
+		return 0;
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	tiqn->tiqn_active_tpgs--;
+	pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n",
+			tpg->tpgt);
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	return 0;
+}
+
+struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
+	struct iscsi_session *sess)
+{
+	struct se_session *se_sess = sess->se_sess;
+	struct se_node_acl *se_nacl = se_sess->se_node_acl;
+	struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
+					se_node_acl);
+
+	return &acl->node_attrib;
+}
+
+struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
+	struct iscsi_tpg_np *tpg_np,
+	int network_transport)
+{
+	struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
+
+	spin_lock(&tpg_np->tpg_np_parent_lock);
+	list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
+			&tpg_np->tpg_np_parent_list, tpg_np_child_list) {
+		if (tpg_np_child->tpg_np->np_network_transport ==
+				network_transport) {
+			spin_unlock(&tpg_np->tpg_np_parent_lock);
+			return tpg_np_child;
+		}
+	}
+	spin_unlock(&tpg_np->tpg_np_parent_lock);
+
+	return NULL;
+}
+
+static bool iscsit_tpg_check_network_portal(
+	struct iscsi_tiqn *tiqn,
+	struct sockaddr_storage *sockaddr,
+	int network_transport)
+{
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tpg_np *tpg_np;
+	struct iscsi_np *np;
+	bool match = false;
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+		spin_lock(&tpg->tpg_np_lock);
+		list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+			np = tpg_np->tpg_np;
+
+			match = iscsit_check_np_match(sockaddr, np,
+						network_transport);
+			if (match)
+				break;
+		}
+		spin_unlock(&tpg->tpg_np_lock);
+	}
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	return match;
+}
+
+struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
+	struct iscsi_portal_group *tpg,
+	struct sockaddr_storage *sockaddr,
+	struct iscsi_tpg_np *tpg_np_parent,
+	int network_transport)
+{
+	struct iscsi_np *np;
+	struct iscsi_tpg_np *tpg_np;
+
+	if (!tpg_np_parent) {
+		if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
+				network_transport)) {
+			pr_err("Network Portal: %pISc already exists on a"
+				" different TPG on %s\n", sockaddr,
+				tpg->tpg_tiqn->tiqn);
+			return ERR_PTR(-EEXIST);
+		}
+	}
+
+	tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
+	if (!tpg_np) {
+		pr_err("Unable to allocate memory for"
+				" struct iscsi_tpg_np.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	np = iscsit_add_np(sockaddr, network_transport);
+	if (IS_ERR(np)) {
+		kfree(tpg_np);
+		return ERR_CAST(np);
+	}
+
+	INIT_LIST_HEAD(&tpg_np->tpg_np_list);
+	INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
+	INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
+	spin_lock_init(&tpg_np->tpg_np_parent_lock);
+	init_completion(&tpg_np->tpg_np_comp);
+	kref_init(&tpg_np->tpg_np_kref);
+	tpg_np->tpg_np		= np;
+	tpg_np->tpg		= tpg;
+
+	spin_lock(&tpg->tpg_np_lock);
+	list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list);
+	tpg->num_tpg_nps++;
+	if (tpg->tpg_tiqn)
+		tpg->tpg_tiqn->tiqn_num_tpg_nps++;
+	spin_unlock(&tpg->tpg_np_lock);
+
+	if (tpg_np_parent) {
+		tpg_np->tpg_np_parent = tpg_np_parent;
+		spin_lock(&tpg_np_parent->tpg_np_parent_lock);
+		list_add_tail(&tpg_np->tpg_np_child_list,
+			&tpg_np_parent->tpg_np_parent_list);
+		spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
+	}
+
+	pr_debug("CORE[%s] - Added Network Portal: %pISpc,%hu on %s\n",
+		tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
+		np->np_transport->name);
+
+	return tpg_np;
+}
+
+static int iscsit_tpg_release_np(
+	struct iscsi_tpg_np *tpg_np,
+	struct iscsi_portal_group *tpg,
+	struct iscsi_np *np)
+{
+	iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
+
+	pr_debug("CORE[%s] - Removed Network Portal: %pISpc,%hu on %s\n",
+		tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
+		np->np_transport->name);
+
+	tpg_np->tpg_np = NULL;
+	tpg_np->tpg = NULL;
+	kfree(tpg_np);
+	/*
+	 * iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released.
+	 */
+	return iscsit_del_np(np);
+}
+
+int iscsit_tpg_del_network_portal(
+	struct iscsi_portal_group *tpg,
+	struct iscsi_tpg_np *tpg_np)
+{
+	struct iscsi_np *np;
+	struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
+	int ret = 0;
+
+	np = tpg_np->tpg_np;
+	if (!np) {
+		pr_err("Unable to locate struct iscsi_np from"
+				" struct iscsi_tpg_np\n");
+		return -EINVAL;
+	}
+
+	if (!tpg_np->tpg_np_parent) {
+		/*
+		 * We are the parent tpg network portal.  Release all of the
+		 * child tpg_np's (eg: the non ISCSI_TCP ones) on our parent
+		 * list first.
+		 */
+		list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
+				&tpg_np->tpg_np_parent_list,
+				tpg_np_child_list) {
+			ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child);
+			if (ret < 0)
+				pr_err("iscsit_tpg_del_network_portal()"
+					" failed: %d\n", ret);
+		}
+	} else {
+		/*
+		 * We are not the parent ISCSI_TCP tpg network portal.  Release
+		 * our own network portals from the child list.
+		 */
+		spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
+		list_del(&tpg_np->tpg_np_child_list);
+		spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
+	}
+
+	spin_lock(&tpg->tpg_np_lock);
+	list_del(&tpg_np->tpg_np_list);
+	tpg->num_tpg_nps--;
+	if (tpg->tpg_tiqn)
+		tpg->tpg_tiqn->tiqn_num_tpg_nps--;
+	spin_unlock(&tpg->tpg_np_lock);
+
+	return iscsit_tpg_release_np(tpg_np, tpg, np);
+}
+
+int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
+{
+	unsigned char buf1[256], buf2[256], *none = NULL;
+	int len;
+	struct iscsi_param *param;
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((authentication != 1) && (authentication != 0)) {
+		pr_err("Illegal value for authentication parameter:"
+			" %u, ignoring request.\n", authentication);
+		return -EINVAL;
+	}
+
+	memset(buf1, 0, sizeof(buf1));
+	memset(buf2, 0, sizeof(buf2));
+
+	param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+	if (!param)
+		return -EINVAL;
+
+	if (authentication) {
+		snprintf(buf1, sizeof(buf1), "%s", param->value);
+		none = strstr(buf1, NONE);
+		if (!none)
+			goto out;
+		if (!strncmp(none + 4, ",", 1)) {
+			if (!strcmp(buf1, none))
+				sprintf(buf2, "%s", none+5);
+			else {
+				none--;
+				*none = '\0';
+				len = sprintf(buf2, "%s", buf1);
+				none += 5;
+				sprintf(buf2 + len, "%s", none);
+			}
+		} else {
+			none--;
+			*none = '\0';
+			sprintf(buf2, "%s", buf1);
+		}
+		if (iscsi_update_param_value(param, buf2) < 0)
+			return -EINVAL;
+	} else {
+		snprintf(buf1, sizeof(buf1), "%s", param->value);
+		none = strstr(buf1, NONE);
+		if (none)
+			goto out;
+		strncat(buf1, ",", strlen(","));
+		strncat(buf1, NONE, strlen(NONE));
+		if (iscsi_update_param_value(param, buf1) < 0)
+			return -EINVAL;
+	}
+
+out:
+	a->authentication = authentication;
+	pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n",
+		a->authentication ? "Enforcing" : "Disabling", tpg->tpgt);
+
+	return 0;
+}
+
+int iscsit_ta_login_timeout(
+	struct iscsi_portal_group *tpg,
+	u32 login_timeout)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if (login_timeout > TA_LOGIN_TIMEOUT_MAX) {
+		pr_err("Requested Login Timeout %u larger than maximum"
+			" %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX);
+		return -EINVAL;
+	} else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) {
+		pr_err("Requested Logout Timeout %u smaller than"
+			" minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN);
+		return -EINVAL;
+	}
+
+	a->login_timeout = login_timeout;
+	pr_debug("Set Logout Timeout to %u for Target Portal Group"
+		" %hu\n", a->login_timeout, tpg->tpgt);
+
+	return 0;
+}
+
+int iscsit_ta_netif_timeout(
+	struct iscsi_portal_group *tpg,
+	u32 netif_timeout)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
+		pr_err("Requested Network Interface Timeout %u larger"
+			" than maximum %u\n", netif_timeout,
+				TA_NETIF_TIMEOUT_MAX);
+		return -EINVAL;
+	} else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
+		pr_err("Requested Network Interface Timeout %u smaller"
+			" than minimum %u\n", netif_timeout,
+				TA_NETIF_TIMEOUT_MIN);
+		return -EINVAL;
+	}
+
+	a->netif_timeout = netif_timeout;
+	pr_debug("Set Network Interface Timeout to %u for"
+		" Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
+
+	return 0;
+}
+
+int iscsit_ta_generate_node_acls(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->generate_node_acls = flag;
+	pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+		tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+
+	if (flag == 1 && a->cache_dynamic_acls == 0) {
+		pr_debug("Explicitly setting cache_dynamic_acls=1 when "
+			"generate_node_acls=1\n");
+		a->cache_dynamic_acls = 1;
+	}
+
+	return 0;
+}
+
+int iscsit_ta_default_cmdsn_depth(
+	struct iscsi_portal_group *tpg,
+	u32 tcq_depth)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
+		pr_err("Requested Default Queue Depth: %u larger"
+			" than maximum %u\n", tcq_depth,
+				TA_DEFAULT_CMDSN_DEPTH_MAX);
+		return -EINVAL;
+	} else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) {
+		pr_err("Requested Default Queue Depth: %u smaller"
+			" than minimum %u\n", tcq_depth,
+				TA_DEFAULT_CMDSN_DEPTH_MIN);
+		return -EINVAL;
+	}
+
+	a->default_cmdsn_depth = tcq_depth;
+	pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n",
+		tpg->tpgt, a->default_cmdsn_depth);
+
+	return 0;
+}
+
+int iscsit_ta_cache_dynamic_acls(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	if (a->generate_node_acls == 1 && flag == 0) {
+		pr_debug("Skipping cache_dynamic_acls=0 when"
+			" generate_node_acls=1\n");
+		return 0;
+	}
+
+	a->cache_dynamic_acls = flag;
+	pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+		" ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+		"Enabled" : "Disabled");
+
+	return 0;
+}
+
+int iscsit_ta_demo_mode_write_protect(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->demo_mode_write_protect = flag;
+	pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n",
+		tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF");
+
+	return 0;
+}
+
+int iscsit_ta_prod_mode_write_protect(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->prod_mode_write_protect = flag;
+	pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:"
+		" %s\n", tpg->tpgt, (a->prod_mode_write_protect) ?
+		"ON" : "OFF");
+
+	return 0;
+}
+
+int iscsit_ta_demo_mode_discovery(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->demo_mode_discovery = flag;
+	pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:"
+		" %s\n", tpg->tpgt, (a->demo_mode_discovery) ?
+		"ON" : "OFF");
+
+	return 0;
+}
+
+int iscsit_ta_default_erl(
+	struct iscsi_portal_group *tpg,
+	u32 default_erl)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) {
+		pr_err("Illegal value for default_erl: %u\n", default_erl);
+		return -EINVAL;
+	}
+
+	a->default_erl = default_erl;
+	pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl);
+
+	return 0;
+}
+
+int iscsit_ta_t10_pi(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->t10_pi = flag;
+	pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
+		" %s\n", tpg->tpgt, (a->t10_pi) ?
+		"ON" : "OFF");
+
+	return 0;
+}
+
+int iscsit_ta_fabric_prot_type(
+	struct iscsi_portal_group *tpg,
+	u32 prot_type)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((prot_type != 0) && (prot_type != 1) && (prot_type != 3)) {
+		pr_err("Illegal value for fabric_prot_type: %u\n", prot_type);
+		return -EINVAL;
+	}
+
+	a->fabric_prot_type = prot_type;
+	pr_debug("iSCSI_TPG[%hu] - T10 Fabric Protection Type: %u\n",
+		 tpg->tpgt, prot_type);
+
+	return 0;
+}
+
+int iscsit_ta_tpg_enabled_sendtargets(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->tpg_enabled_sendtargets = flag;
+	pr_debug("iSCSI_TPG[%hu] - TPG enabled bit required for SendTargets:"
+		" %s\n", tpg->tpgt, (a->tpg_enabled_sendtargets) ? "ON" : "OFF");
+
+	return 0;
+}
+
+int iscsit_ta_login_keys_workaround(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->login_keys_workaround = flag;
+	pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ",
+		tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF");
+
+	return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
new file mode 100644
index 0000000..901a712
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -0,0 +1,44 @@
+#ifndef ISCSI_TARGET_TPG_H
+#define ISCSI_TARGET_TPG_H
+
+extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
+extern int iscsit_load_discovery_tpg(void);
+extern void iscsit_release_discovery_tpg(void);
+extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
+			struct iscsi_np *, struct iscsi_tpg_np **);
+extern int iscsit_get_tpg(struct iscsi_portal_group *);
+extern void iscsit_put_tpg(struct iscsi_portal_group *);
+extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
+extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
+extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
+			int);
+extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
+extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
+extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
+			struct iscsi_portal_group *, const char *, u32);
+extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
+			struct se_node_acl *);
+extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session *);
+extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
+extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
+extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
+			struct sockaddr_storage *, struct iscsi_tpg_np *,
+			int);
+extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
+			struct iscsi_tpg_np *);
+extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32);
+
+#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
new file mode 100644
index 0000000..08217d6
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -0,0 +1,54 @@
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <target/iscsi/iscsi_transport.h>
+
+static LIST_HEAD(g_transport_list);
+static DEFINE_MUTEX(transport_mutex);
+
+struct iscsit_transport *iscsit_get_transport(int type)
+{
+	struct iscsit_transport *t;
+
+	mutex_lock(&transport_mutex);
+	list_for_each_entry(t, &g_transport_list, t_node) {
+		if (t->transport_type == type) {
+			if (t->owner && !try_module_get(t->owner)) {
+				t = NULL;
+			}
+			mutex_unlock(&transport_mutex);
+			return t;
+		}
+	}
+	mutex_unlock(&transport_mutex);
+
+	return NULL;
+}
+
+void iscsit_put_transport(struct iscsit_transport *t)
+{
+	module_put(t->owner);
+}
+
+int iscsit_register_transport(struct iscsit_transport *t)
+{
+	INIT_LIST_HEAD(&t->t_node);
+
+	mutex_lock(&transport_mutex);
+	list_add_tail(&t->t_node, &g_transport_list);
+	mutex_unlock(&transport_mutex);
+
+	pr_debug("Registered iSCSI transport: %s\n", t->name);
+
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_register_transport);
+
+void iscsit_unregister_transport(struct iscsit_transport *t)
+{
+	mutex_lock(&transport_mutex);
+	list_del(&t->t_node);
+	mutex_unlock(&transport_mutex);
+
+	pr_debug("Unregistered iSCSI transport: %s\n", t->name);
+}
+EXPORT_SYMBOL(iscsit_unregister_transport);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
new file mode 100644
index 0000000..9359052
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -0,0 +1,1482 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target specific utility functions.
+ *
+ * (c) Copyright 2007-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <linux/percpu_ida.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
+
+#include <target/iscsi/iscsi_target_core.h>
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+#define PRINT_BUFF(buff, len)					\
+{								\
+	int zzz;						\
+								\
+	pr_debug("%d:\n", __LINE__);				\
+	for (zzz = 0; zzz < len; zzz++) {			\
+		if (zzz % 16 == 0) {				\
+			if (zzz)				\
+				pr_debug("\n");			\
+			pr_debug("%4i: ", zzz);			\
+		}						\
+		pr_debug("%02x ", (unsigned char) (buff)[zzz]);	\
+	}							\
+	if ((len + 1) % 16)					\
+		pr_debug("\n");					\
+}
+
+extern struct list_head g_tiqn_list;
+extern spinlock_t tiqn_lock;
+
+/*
+ *	Called with cmd->r2t_lock held.
+ */
+int iscsit_add_r2t_to_list(
+	struct iscsi_cmd *cmd,
+	u32 offset,
+	u32 xfer_len,
+	int recovery,
+	u32 r2t_sn)
+{
+	struct iscsi_r2t *r2t;
+
+	r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
+	if (!r2t) {
+		pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&r2t->r2t_list);
+
+	r2t->recovery_r2t = recovery;
+	r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
+	r2t->offset = offset;
+	r2t->xfer_len = xfer_len;
+	list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
+
+	spin_lock_bh(&cmd->r2t_lock);
+	return 0;
+}
+
+struct iscsi_r2t *iscsit_get_r2t_for_eos(
+	struct iscsi_cmd *cmd,
+	u32 offset,
+	u32 length)
+{
+	struct iscsi_r2t *r2t;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+		if ((r2t->offset <= offset) &&
+		    (r2t->offset + r2t->xfer_len) >= (offset + length)) {
+			spin_unlock_bh(&cmd->r2t_lock);
+			return r2t;
+		}
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	pr_err("Unable to locate R2T for Offset: %u, Length:"
+			" %u\n", offset, length);
+	return NULL;
+}
+
+struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
+{
+	struct iscsi_r2t *r2t;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+		if (!r2t->sent_r2t) {
+			spin_unlock_bh(&cmd->r2t_lock);
+			return r2t;
+		}
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	pr_err("Unable to locate next R2T to send for ITT:"
+			" 0x%08x.\n", cmd->init_task_tag);
+	return NULL;
+}
+
+/*
+ *	Called with cmd->r2t_lock held.
+ */
+void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
+{
+	list_del(&r2t->r2t_list);
+	kmem_cache_free(lio_r2t_cache, r2t);
+}
+
+void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
+{
+	struct iscsi_r2t *r2t, *r2t_tmp;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
+		iscsit_free_r2t(r2t, cmd);
+	spin_unlock_bh(&cmd->r2t_lock);
+}
+
+/*
+ * May be called from software interrupt (timer) context for allocating
+ * iSCSI NopINs.
+ */
+struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
+{
+	struct iscsi_cmd *cmd;
+	struct se_session *se_sess = conn->sess->se_sess;
+	int size, tag;
+
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
+	if (tag < 0)
+		return NULL;
+
+	size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
+	cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
+	memset(cmd, 0, size);
+
+	cmd->se_cmd.map_tag = tag;
+	cmd->conn = conn;
+	INIT_LIST_HEAD(&cmd->i_conn_node);
+	INIT_LIST_HEAD(&cmd->datain_list);
+	INIT_LIST_HEAD(&cmd->cmd_r2t_list);
+	spin_lock_init(&cmd->datain_lock);
+	spin_lock_init(&cmd->dataout_timeout_lock);
+	spin_lock_init(&cmd->istate_lock);
+	spin_lock_init(&cmd->error_lock);
+	spin_lock_init(&cmd->r2t_lock);
+
+	return cmd;
+}
+EXPORT_SYMBOL(iscsit_allocate_cmd);
+
+struct iscsi_seq *iscsit_get_seq_holder_for_datain(
+	struct iscsi_cmd *cmd,
+	u32 seq_send_order)
+{
+	u32 i;
+
+	for (i = 0; i < cmd->seq_count; i++)
+		if (cmd->seq_list[i].seq_send_order == seq_send_order)
+			return &cmd->seq_list[i];
+
+	return NULL;
+}
+
+struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
+{
+	u32 i;
+
+	if (!cmd->seq_list) {
+		pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+		return NULL;
+	}
+
+	for (i = 0; i < cmd->seq_count; i++) {
+		if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+			continue;
+		if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
+			cmd->seq_send_order++;
+			return &cmd->seq_list[i];
+		}
+	}
+
+	return NULL;
+}
+
+struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
+	struct iscsi_cmd *cmd,
+	u32 r2t_sn)
+{
+	struct iscsi_r2t *r2t;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+		if (r2t->r2t_sn == r2t_sn) {
+			spin_unlock_bh(&cmd->r2t_lock);
+			return r2t;
+		}
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return NULL;
+}
+
+static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
+{
+	u32 max_cmdsn;
+	int ret;
+
+	/*
+	 * This is the proper method of checking received CmdSN against
+	 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
+	 * or order CmdSNs due to multiple connection sessions and/or
+	 * CRC failures.
+	 */
+	max_cmdsn = atomic_read(&sess->max_cmd_sn);
+	if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
+		pr_err("Received CmdSN: 0x%08x is greater than"
+		       " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
+		ret = CMDSN_MAXCMDSN_OVERRUN;
+
+	} else if (cmdsn == sess->exp_cmd_sn) {
+		sess->exp_cmd_sn++;
+		pr_debug("Received CmdSN matches ExpCmdSN,"
+		      " incremented ExpCmdSN to: 0x%08x\n",
+		      sess->exp_cmd_sn);
+		ret = CMDSN_NORMAL_OPERATION;
+
+	} else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
+		pr_debug("Received CmdSN: 0x%08x is greater"
+		      " than ExpCmdSN: 0x%08x, not acknowledging.\n",
+		      cmdsn, sess->exp_cmd_sn);
+		ret = CMDSN_HIGHER_THAN_EXP;
+
+	} else {
+		pr_err("Received CmdSN: 0x%08x is less than"
+		       " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
+		       sess->exp_cmd_sn);
+		ret = CMDSN_LOWER_THAN_EXP;
+	}
+
+	return ret;
+}
+
+/*
+ * Commands may be received out of order if MC/S is in use.
+ * Ensure they are executed in CmdSN order.
+ */
+int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			unsigned char *buf, __be32 cmdsn)
+{
+	int ret, cmdsn_ret;
+	bool reject = false;
+	u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
+
+	mutex_lock(&conn->sess->cmdsn_mutex);
+
+	cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn));
+	switch (cmdsn_ret) {
+	case CMDSN_NORMAL_OPERATION:
+		ret = iscsit_execute_cmd(cmd, 0);
+		if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
+			iscsit_execute_ooo_cmdsns(conn->sess);
+		else if (ret < 0) {
+			reject = true;
+			ret = CMDSN_ERROR_CANNOT_RECOVER;
+		}
+		break;
+	case CMDSN_HIGHER_THAN_EXP:
+		ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
+		if (ret < 0) {
+			reject = true;
+			ret = CMDSN_ERROR_CANNOT_RECOVER;
+			break;
+		}
+		ret = CMDSN_HIGHER_THAN_EXP;
+		break;
+	case CMDSN_LOWER_THAN_EXP:
+	case CMDSN_MAXCMDSN_OVERRUN:
+	default:
+		cmd->i_state = ISTATE_REMOVE;
+		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+		/*
+		 * Existing callers for iscsit_sequence_cmd() will silently
+		 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
+		 * return for CMDSN_MAXCMDSN_OVERRUN as well..
+		 */
+		ret = CMDSN_LOWER_THAN_EXP;
+		break;
+	}
+	mutex_unlock(&conn->sess->cmdsn_mutex);
+
+	if (reject)
+		iscsit_reject_cmd(cmd, reason, buf);
+
+	return ret;
+}
+EXPORT_SYMBOL(iscsit_sequence_cmd);
+
+int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	if (conn->sess->sess_ops->InitialR2T) {
+		pr_err("Received unexpected unsolicited data"
+			" while InitialR2T=Yes, protocol error.\n");
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
+		return -1;
+	}
+
+	if ((cmd->first_burst_len + payload_length) >
+	     conn->sess->sess_ops->FirstBurstLength) {
+		pr_err("Total %u bytes exceeds FirstBurstLength: %u"
+			" for this Unsolicited DataOut Burst.\n",
+			(cmd->first_burst_len + payload_length),
+				conn->sess->sess_ops->FirstBurstLength);
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+		return -1;
+	}
+
+	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
+		return 0;
+
+	if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
+	    ((cmd->first_burst_len + payload_length) !=
+	      conn->sess->sess_ops->FirstBurstLength)) {
+		pr_err("Unsolicited non-immediate data received %u"
+			" does not equal FirstBurstLength: %u, and does"
+			" not equal ExpXferLen %u.\n",
+			(cmd->first_burst_len + payload_length),
+			conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+		return -1;
+	}
+	return 0;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_itt(
+	struct iscsi_conn *conn,
+	itt_t init_task_tag)
+{
+	struct iscsi_cmd *cmd;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
+		if (cmd->init_task_tag == init_task_tag) {
+			spin_unlock_bh(&conn->cmd_lock);
+			return cmd;
+		}
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
+			init_task_tag, conn->cid);
+	return NULL;
+}
+EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
+
+struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
+	struct iscsi_conn *conn,
+	itt_t init_task_tag,
+	u32 length)
+{
+	struct iscsi_cmd *cmd;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
+		if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
+			continue;
+		if (cmd->init_task_tag == init_task_tag) {
+			spin_unlock_bh(&conn->cmd_lock);
+			return cmd;
+		}
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
+			" dumping payload\n", init_task_tag, conn->cid);
+	if (length)
+		iscsit_dump_data_payload(conn, length, 1);
+
+	return NULL;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_ttt(
+	struct iscsi_conn *conn,
+	u32 targ_xfer_tag)
+{
+	struct iscsi_cmd *cmd = NULL;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
+		if (cmd->targ_xfer_tag == targ_xfer_tag) {
+			spin_unlock_bh(&conn->cmd_lock);
+			return cmd;
+		}
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
+			targ_xfer_tag, conn->cid);
+	return NULL;
+}
+
+int iscsit_find_cmd_for_recovery(
+	struct iscsi_session *sess,
+	struct iscsi_cmd **cmd_ptr,
+	struct iscsi_conn_recovery **cr_ptr,
+	itt_t init_task_tag)
+{
+	struct iscsi_cmd *cmd = NULL;
+	struct iscsi_conn_recovery *cr;
+	/*
+	 * Scan through the inactive connection recovery list's command list.
+	 * If init_task_tag matches the command is still alligent.
+	 */
+	spin_lock(&sess->cr_i_lock);
+	list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
+		spin_lock(&cr->conn_recovery_cmd_lock);
+		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
+			if (cmd->init_task_tag == init_task_tag) {
+				spin_unlock(&cr->conn_recovery_cmd_lock);
+				spin_unlock(&sess->cr_i_lock);
+
+				*cr_ptr = cr;
+				*cmd_ptr = cmd;
+				return -2;
+			}
+		}
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+	}
+	spin_unlock(&sess->cr_i_lock);
+	/*
+	 * Scan through the active connection recovery list's command list.
+	 * If init_task_tag matches the command is ready to be reassigned.
+	 */
+	spin_lock(&sess->cr_a_lock);
+	list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
+		spin_lock(&cr->conn_recovery_cmd_lock);
+		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
+			if (cmd->init_task_tag == init_task_tag) {
+				spin_unlock(&cr->conn_recovery_cmd_lock);
+				spin_unlock(&sess->cr_a_lock);
+
+				*cr_ptr = cr;
+				*cmd_ptr = cmd;
+				return 0;
+			}
+		}
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+	}
+	spin_unlock(&sess->cr_a_lock);
+
+	return -1;
+}
+
+void iscsit_add_cmd_to_immediate_queue(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn,
+	u8 state)
+{
+	struct iscsi_queue_req *qr;
+
+	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
+	if (!qr) {
+		pr_err("Unable to allocate memory for"
+				" struct iscsi_queue_req\n");
+		return;
+	}
+	INIT_LIST_HEAD(&qr->qr_list);
+	qr->cmd = cmd;
+	qr->state = state;
+
+	spin_lock_bh(&conn->immed_queue_lock);
+	list_add_tail(&qr->qr_list, &conn->immed_queue_list);
+	atomic_inc(&cmd->immed_queue_count);
+	atomic_set(&conn->check_immediate_queue, 1);
+	spin_unlock_bh(&conn->immed_queue_lock);
+
+	wake_up(&conn->queues_wq);
+}
+
+struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr;
+
+	spin_lock_bh(&conn->immed_queue_lock);
+	if (list_empty(&conn->immed_queue_list)) {
+		spin_unlock_bh(&conn->immed_queue_lock);
+		return NULL;
+	}
+	qr = list_first_entry(&conn->immed_queue_list,
+			      struct iscsi_queue_req, qr_list);
+
+	list_del(&qr->qr_list);
+	if (qr->cmd)
+		atomic_dec(&qr->cmd->immed_queue_count);
+	spin_unlock_bh(&conn->immed_queue_lock);
+
+	return qr;
+}
+
+static void iscsit_remove_cmd_from_immediate_queue(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr, *qr_tmp;
+
+	spin_lock_bh(&conn->immed_queue_lock);
+	if (!atomic_read(&cmd->immed_queue_count)) {
+		spin_unlock_bh(&conn->immed_queue_lock);
+		return;
+	}
+
+	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
+		if (qr->cmd != cmd)
+			continue;
+
+		atomic_dec(&qr->cmd->immed_queue_count);
+		list_del(&qr->qr_list);
+		kmem_cache_free(lio_qr_cache, qr);
+	}
+	spin_unlock_bh(&conn->immed_queue_lock);
+
+	if (atomic_read(&cmd->immed_queue_count)) {
+		pr_err("ITT: 0x%08x immed_queue_count: %d\n",
+			cmd->init_task_tag,
+			atomic_read(&cmd->immed_queue_count));
+	}
+}
+
+void iscsit_add_cmd_to_response_queue(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn,
+	u8 state)
+{
+	struct iscsi_queue_req *qr;
+
+	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
+	if (!qr) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_queue_req\n");
+		return;
+	}
+	INIT_LIST_HEAD(&qr->qr_list);
+	qr->cmd = cmd;
+	qr->state = state;
+
+	spin_lock_bh(&conn->response_queue_lock);
+	list_add_tail(&qr->qr_list, &conn->response_queue_list);
+	atomic_inc(&cmd->response_queue_count);
+	spin_unlock_bh(&conn->response_queue_lock);
+
+	wake_up(&conn->queues_wq);
+}
+
+struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr;
+
+	spin_lock_bh(&conn->response_queue_lock);
+	if (list_empty(&conn->response_queue_list)) {
+		spin_unlock_bh(&conn->response_queue_lock);
+		return NULL;
+	}
+
+	qr = list_first_entry(&conn->response_queue_list,
+			      struct iscsi_queue_req, qr_list);
+
+	list_del(&qr->qr_list);
+	if (qr->cmd)
+		atomic_dec(&qr->cmd->response_queue_count);
+	spin_unlock_bh(&conn->response_queue_lock);
+
+	return qr;
+}
+
+static void iscsit_remove_cmd_from_response_queue(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr, *qr_tmp;
+
+	spin_lock_bh(&conn->response_queue_lock);
+	if (!atomic_read(&cmd->response_queue_count)) {
+		spin_unlock_bh(&conn->response_queue_lock);
+		return;
+	}
+
+	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
+				qr_list) {
+		if (qr->cmd != cmd)
+			continue;
+
+		atomic_dec(&qr->cmd->response_queue_count);
+		list_del(&qr->qr_list);
+		kmem_cache_free(lio_qr_cache, qr);
+	}
+	spin_unlock_bh(&conn->response_queue_lock);
+
+	if (atomic_read(&cmd->response_queue_count)) {
+		pr_err("ITT: 0x%08x response_queue_count: %d\n",
+			cmd->init_task_tag,
+			atomic_read(&cmd->response_queue_count));
+	}
+}
+
+bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
+{
+	bool empty;
+
+	spin_lock_bh(&conn->immed_queue_lock);
+	empty = list_empty(&conn->immed_queue_list);
+	spin_unlock_bh(&conn->immed_queue_lock);
+
+	if (!empty)
+		return empty;
+
+	spin_lock_bh(&conn->response_queue_lock);
+	empty = list_empty(&conn->response_queue_list);
+	spin_unlock_bh(&conn->response_queue_lock);
+
+	return empty;
+}
+
+void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr, *qr_tmp;
+
+	spin_lock_bh(&conn->immed_queue_lock);
+	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
+		list_del(&qr->qr_list);
+		if (qr->cmd)
+			atomic_dec(&qr->cmd->immed_queue_count);
+
+		kmem_cache_free(lio_qr_cache, qr);
+	}
+	spin_unlock_bh(&conn->immed_queue_lock);
+
+	spin_lock_bh(&conn->response_queue_lock);
+	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
+			qr_list) {
+		list_del(&qr->qr_list);
+		if (qr->cmd)
+			atomic_dec(&qr->cmd->response_queue_count);
+
+		kmem_cache_free(lio_qr_cache, qr);
+	}
+	spin_unlock_bh(&conn->response_queue_lock);
+}
+
+void iscsit_release_cmd(struct iscsi_cmd *cmd)
+{
+	struct iscsi_session *sess;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+
+	if (cmd->conn)
+		sess = cmd->conn->sess;
+	else
+		sess = cmd->sess;
+
+	BUG_ON(!sess || !sess->se_sess);
+
+	kfree(cmd->buf_ptr);
+	kfree(cmd->pdu_list);
+	kfree(cmd->seq_list);
+	kfree(cmd->tmr_req);
+	kfree(cmd->iov_data);
+	kfree(cmd->text_in_ptr);
+
+	percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
+}
+EXPORT_SYMBOL(iscsit_release_cmd);
+
+void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
+		       bool check_queues)
+{
+	struct iscsi_conn *conn = cmd->conn;
+
+	if (scsi_cmd) {
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			iscsit_stop_dataout_timer(cmd);
+			iscsit_free_r2ts_from_list(cmd);
+		}
+		if (cmd->data_direction == DMA_FROM_DEVICE)
+			iscsit_free_all_datain_reqs(cmd);
+	}
+
+	if (conn && check_queues) {
+		iscsit_remove_cmd_from_immediate_queue(cmd, conn);
+		iscsit_remove_cmd_from_response_queue(cmd, conn);
+	}
+}
+
+void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
+{
+	struct se_cmd *se_cmd = NULL;
+	int rc;
+	bool op_scsi = false;
+	/*
+	 * Determine if a struct se_cmd is associated with
+	 * this struct iscsi_cmd.
+	 */
+	switch (cmd->iscsi_opcode) {
+	case ISCSI_OP_SCSI_CMD:
+		op_scsi = true;
+		/*
+		 * Fallthrough
+		 */
+	case ISCSI_OP_SCSI_TMFUNC:
+		se_cmd = &cmd->se_cmd;
+		__iscsit_free_cmd(cmd, op_scsi, shutdown);
+		rc = transport_generic_free_cmd(se_cmd, shutdown);
+		if (!rc && shutdown && se_cmd->se_sess) {
+			__iscsit_free_cmd(cmd, op_scsi, shutdown);
+			target_put_sess_cmd(se_cmd);
+		}
+		break;
+	case ISCSI_OP_REJECT:
+		/*
+		 * Handle special case for REJECT when iscsi_add_reject*() has
+		 * overwritten the original iscsi_opcode assignment, and the
+		 * associated cmd->se_cmd needs to be released.
+		 */
+		if (cmd->se_cmd.se_tfo != NULL) {
+			se_cmd = &cmd->se_cmd;
+			__iscsit_free_cmd(cmd, true, shutdown);
+
+			rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
+			if (!rc && shutdown && se_cmd->se_sess) {
+				__iscsit_free_cmd(cmd, true, shutdown);
+				target_put_sess_cmd(se_cmd);
+			}
+			break;
+		}
+		/* Fall-through */
+	default:
+		__iscsit_free_cmd(cmd, false, shutdown);
+		iscsit_release_cmd(cmd);
+		break;
+	}
+}
+
+int iscsit_check_session_usage_count(struct iscsi_session *sess)
+{
+	spin_lock_bh(&sess->session_usage_lock);
+	if (sess->session_usage_count != 0) {
+		sess->session_waiting_on_uc = 1;
+		spin_unlock_bh(&sess->session_usage_lock);
+		if (in_interrupt())
+			return 2;
+
+		wait_for_completion(&sess->session_waiting_on_uc_comp);
+		return 1;
+	}
+	spin_unlock_bh(&sess->session_usage_lock);
+
+	return 0;
+}
+
+void iscsit_dec_session_usage_count(struct iscsi_session *sess)
+{
+	spin_lock_bh(&sess->session_usage_lock);
+	sess->session_usage_count--;
+
+	if (!sess->session_usage_count && sess->session_waiting_on_uc)
+		complete(&sess->session_waiting_on_uc_comp);
+
+	spin_unlock_bh(&sess->session_usage_lock);
+}
+
+void iscsit_inc_session_usage_count(struct iscsi_session *sess)
+{
+	spin_lock_bh(&sess->session_usage_lock);
+	sess->session_usage_count++;
+	spin_unlock_bh(&sess->session_usage_lock);
+}
+
+struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
+{
+	struct iscsi_conn *conn;
+
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+		if ((conn->cid == cid) &&
+		    (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
+			iscsit_inc_conn_usage_count(conn);
+			spin_unlock_bh(&sess->conn_lock);
+			return conn;
+		}
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	return NULL;
+}
+
+struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
+{
+	struct iscsi_conn *conn;
+
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+		if (conn->cid == cid) {
+			iscsit_inc_conn_usage_count(conn);
+			spin_lock(&conn->state_lock);
+			atomic_set(&conn->connection_wait_rcfr, 1);
+			spin_unlock(&conn->state_lock);
+			spin_unlock_bh(&sess->conn_lock);
+			return conn;
+		}
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	return NULL;
+}
+
+void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->conn_usage_lock);
+	if (conn->conn_usage_count != 0) {
+		conn->conn_waiting_on_uc = 1;
+		spin_unlock_bh(&conn->conn_usage_lock);
+
+		wait_for_completion(&conn->conn_waiting_on_uc_comp);
+		return;
+	}
+	spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->conn_usage_lock);
+	conn->conn_usage_count--;
+
+	if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
+		complete(&conn->conn_waiting_on_uc_comp);
+
+	spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->conn_usage_lock);
+	conn->conn_usage_count++;
+	spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
+{
+	u8 state;
+	struct iscsi_cmd *cmd;
+
+	cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
+	if (!cmd)
+		return -1;
+
+	cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
+	state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
+				ISTATE_SEND_NOPIN_NO_RESPONSE;
+	cmd->init_task_tag = RESERVED_ITT;
+	cmd->targ_xfer_tag = (want_response) ?
+			     session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	if (want_response)
+		iscsit_start_nopin_response_timer(conn);
+	iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
+
+	return 0;
+}
+
+static void iscsit_handle_nopin_response_timeout(unsigned long data)
+{
+	struct iscsi_conn *conn = (struct iscsi_conn *) data;
+
+	iscsit_inc_conn_usage_count(conn);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		iscsit_dec_conn_usage_count(conn);
+		return;
+	}
+
+	pr_debug("Did not receive response to NOPIN on CID: %hu on"
+		" SID: %u, failing connection.\n", conn->cid,
+			conn->sess->sid);
+	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+
+	{
+	struct iscsi_portal_group *tpg = conn->sess->tpg;
+	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+	if (tiqn) {
+		spin_lock_bh(&tiqn->sess_err_stats.lock);
+		strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+				conn->sess->sess_ops->InitiatorName);
+		tiqn->sess_err_stats.last_sess_failure_type =
+				ISCSI_SESS_ERR_CXN_TIMEOUT;
+		tiqn->sess_err_stats.cxn_timeout_errors++;
+		atomic_long_inc(&conn->sess->conn_timeout_errors);
+		spin_unlock_bh(&tiqn->sess_err_stats.lock);
+	}
+	}
+
+	iscsit_cause_connection_reinstatement(conn, 0);
+	iscsit_dec_conn_usage_count(conn);
+}
+
+void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		return;
+	}
+
+	mod_timer(&conn->nopin_response_timer,
+		(get_jiffies_64() + na->nopin_response_timeout * HZ));
+	spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+/*
+ *	Called with conn->nopin_timer_lock held.
+ */
+void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		return;
+	}
+
+	init_timer(&conn->nopin_response_timer);
+	conn->nopin_response_timer.expires =
+		(get_jiffies_64() + na->nopin_response_timeout * HZ);
+	conn->nopin_response_timer.data = (unsigned long)conn;
+	conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
+	conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
+	conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&conn->nopin_response_timer);
+
+	pr_debug("Started NOPIN Response Timer on CID: %d to %u"
+		" seconds\n", conn->cid, na->nopin_response_timeout);
+	spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		return;
+	}
+	conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+
+	del_timer_sync(&conn->nopin_response_timer);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+static void iscsit_handle_nopin_timeout(unsigned long data)
+{
+	struct iscsi_conn *conn = (struct iscsi_conn *) data;
+
+	iscsit_inc_conn_usage_count(conn);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		iscsit_dec_conn_usage_count(conn);
+		return;
+	}
+	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+
+	iscsit_add_nopin(conn, 1);
+	iscsit_dec_conn_usage_count(conn);
+}
+
+/*
+ * Called with conn->nopin_timer_lock held.
+ */
+void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+	/*
+	* NOPIN timeout is disabled.
+	 */
+	if (!na->nopin_timeout)
+		return;
+
+	if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
+		return;
+
+	init_timer(&conn->nopin_timer);
+	conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
+	conn->nopin_timer.data = (unsigned long)conn;
+	conn->nopin_timer.function = iscsit_handle_nopin_timeout;
+	conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
+	conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&conn->nopin_timer);
+
+	pr_debug("Started NOPIN Timer on CID: %d at %u second"
+		" interval\n", conn->cid, na->nopin_timeout);
+}
+
+void iscsit_start_nopin_timer(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+	/*
+	 * NOPIN timeout is disabled..
+	 */
+	if (!na->nopin_timeout)
+		return;
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		return;
+	}
+
+	init_timer(&conn->nopin_timer);
+	conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
+	conn->nopin_timer.data = (unsigned long)conn;
+	conn->nopin_timer.function = iscsit_handle_nopin_timeout;
+	conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
+	conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&conn->nopin_timer);
+
+	pr_debug("Started NOPIN Timer on CID: %d at %u second"
+			" interval\n", conn->cid, na->nopin_timeout);
+	spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		return;
+	}
+	conn->nopin_timer_flags |= ISCSI_TF_STOP;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+
+	del_timer_sync(&conn->nopin_timer);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+int iscsit_send_tx_data(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn,
+	int use_misc)
+{
+	int tx_sent, tx_size;
+	u32 iov_count;
+	struct kvec *iov;
+
+send_data:
+	tx_size = cmd->tx_size;
+
+	if (!use_misc) {
+		iov = &cmd->iov_data[0];
+		iov_count = cmd->iov_data_count;
+	} else {
+		iov = &cmd->iov_misc[0];
+		iov_count = cmd->iov_misc_count;
+	}
+
+	tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
+	if (tx_size != tx_sent) {
+		if (tx_sent == -EAGAIN) {
+			pr_err("tx_data() returned -EAGAIN\n");
+			goto send_data;
+		} else
+			return -1;
+	}
+	cmd->tx_size = 0;
+
+	return 0;
+}
+
+int iscsit_fe_sendpage_sg(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct scatterlist *sg = cmd->first_data_sg;
+	struct kvec iov;
+	u32 tx_hdr_size, data_len;
+	u32 offset = cmd->first_data_sg_off;
+	int tx_sent, iov_off;
+
+send_hdr:
+	tx_hdr_size = ISCSI_HDR_LEN;
+	if (conn->conn_ops->HeaderDigest)
+		tx_hdr_size += ISCSI_CRC_LEN;
+
+	iov.iov_base = cmd->pdu;
+	iov.iov_len = tx_hdr_size;
+
+	tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
+	if (tx_hdr_size != tx_sent) {
+		if (tx_sent == -EAGAIN) {
+			pr_err("tx_data() returned -EAGAIN\n");
+			goto send_hdr;
+		}
+		return -1;
+	}
+
+	data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
+	/*
+	 * Set iov_off used by padding and data digest tx_data() calls below
+	 * in order to determine proper offset into cmd->iov_data[]
+	 */
+	if (conn->conn_ops->DataDigest) {
+		data_len -= ISCSI_CRC_LEN;
+		if (cmd->padding)
+			iov_off = (cmd->iov_data_count - 2);
+		else
+			iov_off = (cmd->iov_data_count - 1);
+	} else {
+		iov_off = (cmd->iov_data_count - 1);
+	}
+	/*
+	 * Perform sendpage() for each page in the scatterlist
+	 */
+	while (data_len) {
+		u32 space = (sg->length - offset);
+		u32 sub_len = min_t(u32, data_len, space);
+send_pg:
+		tx_sent = conn->sock->ops->sendpage(conn->sock,
+					sg_page(sg), sg->offset + offset, sub_len, 0);
+		if (tx_sent != sub_len) {
+			if (tx_sent == -EAGAIN) {
+				pr_err("tcp_sendpage() returned"
+						" -EAGAIN\n");
+				goto send_pg;
+			}
+
+			pr_err("tcp_sendpage() failure: %d\n",
+					tx_sent);
+			return -1;
+		}
+
+		data_len -= sub_len;
+		offset = 0;
+		sg = sg_next(sg);
+	}
+
+send_padding:
+	if (cmd->padding) {
+		struct kvec *iov_p = &cmd->iov_data[iov_off++];
+
+		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
+		if (cmd->padding != tx_sent) {
+			if (tx_sent == -EAGAIN) {
+				pr_err("tx_data() returned -EAGAIN\n");
+				goto send_padding;
+			}
+			return -1;
+		}
+	}
+
+send_datacrc:
+	if (conn->conn_ops->DataDigest) {
+		struct kvec *iov_d = &cmd->iov_data[iov_off];
+
+		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
+		if (ISCSI_CRC_LEN != tx_sent) {
+			if (tx_sent == -EAGAIN) {
+				pr_err("tx_data() returned -EAGAIN\n");
+				goto send_datacrc;
+			}
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *      This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
+ *      back to the Initiator when an expection condition occurs with the
+ *      errors set in status_class and status_detail.
+ *
+ *      Parameters:     iSCSI Connection, Status Class, Status Detail.
+ *      Returns:        0 on success, -1 on error.
+ */
+int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
+{
+	struct iscsi_login_rsp *hdr;
+	struct iscsi_login *login = conn->conn_login;
+
+	login->login_failed = 1;
+	iscsit_collect_login_stats(conn, status_class, status_detail);
+
+	memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
+
+	hdr	= (struct iscsi_login_rsp *)&login->rsp[0];
+	hdr->opcode		= ISCSI_OP_LOGIN_RSP;
+	hdr->status_class	= status_class;
+	hdr->status_detail	= status_detail;
+	hdr->itt		= conn->login_itt;
+
+	return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
+}
+
+void iscsit_print_session_params(struct iscsi_session *sess)
+{
+	struct iscsi_conn *conn;
+
+	pr_debug("-----------------------------[Session Params for"
+		" SID: %u]-----------------------------\n", sess->sid);
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
+		iscsi_dump_conn_ops(conn->conn_ops);
+	spin_unlock_bh(&sess->conn_lock);
+
+	iscsi_dump_sess_ops(sess->sess_ops);
+}
+
+static int iscsit_do_rx_data(
+	struct iscsi_conn *conn,
+	struct iscsi_data_count *count)
+{
+	int data = count->data_length, rx_loop = 0, total_rx = 0;
+	struct msghdr msg;
+
+	if (!conn || !conn->sock || !conn->conn_ops)
+		return -1;
+
+	memset(&msg, 0, sizeof(struct msghdr));
+	iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
+		      count->iov, count->iov_count, data);
+
+	while (total_rx < data) {
+		rx_loop = sock_recvmsg(conn->sock, &msg,
+				      (data - total_rx), MSG_WAITALL);
+		if (rx_loop <= 0) {
+			pr_debug("rx_loop: %d total_rx: %d\n",
+				rx_loop, total_rx);
+			return rx_loop;
+		}
+		total_rx += rx_loop;
+		pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
+				rx_loop, total_rx, data);
+	}
+
+	return total_rx;
+}
+
+static int iscsit_do_tx_data(
+	struct iscsi_conn *conn,
+	struct iscsi_data_count *count)
+{
+	int ret, iov_len;
+	struct kvec *iov_p;
+	struct msghdr msg;
+
+	if (!conn || !conn->sock || !conn->conn_ops)
+		return -1;
+
+	if (count->data_length <= 0) {
+		pr_err("Data length is: %d\n", count->data_length);
+		return -1;
+	}
+
+	memset(&msg, 0, sizeof(struct msghdr));
+
+	iov_p = count->iov;
+	iov_len = count->iov_count;
+
+	ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+			     count->data_length);
+	if (ret != count->data_length) {
+		pr_err("Unexpected ret: %d send data %d\n",
+		       ret, count->data_length);
+		return -EPIPE;
+	}
+	pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
+
+	return ret;
+}
+
+int rx_data(
+	struct iscsi_conn *conn,
+	struct kvec *iov,
+	int iov_count,
+	int data)
+{
+	struct iscsi_data_count c;
+
+	if (!conn || !conn->sock || !conn->conn_ops)
+		return -1;
+
+	memset(&c, 0, sizeof(struct iscsi_data_count));
+	c.iov = iov;
+	c.iov_count = iov_count;
+	c.data_length = data;
+	c.type = ISCSI_RX_DATA;
+
+	return iscsit_do_rx_data(conn, &c);
+}
+
+int tx_data(
+	struct iscsi_conn *conn,
+	struct kvec *iov,
+	int iov_count,
+	int data)
+{
+	struct iscsi_data_count c;
+
+	if (!conn || !conn->sock || !conn->conn_ops)
+		return -1;
+
+	memset(&c, 0, sizeof(struct iscsi_data_count));
+	c.iov = iov;
+	c.iov_count = iov_count;
+	c.data_length = data;
+	c.type = ISCSI_TX_DATA;
+
+	return iscsit_do_tx_data(conn, &c);
+}
+
+static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y)
+{
+	switch (x->ss_family) {
+	case AF_INET: {
+		struct sockaddr_in *sinx = (struct sockaddr_in *)x;
+		struct sockaddr_in *siny = (struct sockaddr_in *)y;
+		if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
+			return false;
+		if (sinx->sin_port != siny->sin_port)
+			return false;
+		break;
+	}
+	case AF_INET6: {
+		struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
+		struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
+		if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
+			return false;
+		if (sinx->sin6_port != siny->sin6_port)
+			return false;
+		break;
+	}
+	default:
+		return false;
+	}
+	return true;
+}
+
+void iscsit_collect_login_stats(
+	struct iscsi_conn *conn,
+	u8 status_class,
+	u8 status_detail)
+{
+	struct iscsi_param *intrname = NULL;
+	struct iscsi_tiqn *tiqn;
+	struct iscsi_login_stats *ls;
+
+	tiqn = iscsit_snmp_get_tiqn(conn);
+	if (!tiqn)
+		return;
+
+	ls = &tiqn->login_stats;
+
+	spin_lock(&ls->lock);
+	if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) &&
+	    ((get_jiffies_64() - ls->last_fail_time) < 10)) {
+		/* We already have the failure info for this login */
+		spin_unlock(&ls->lock);
+		return;
+	}
+
+	if (status_class == ISCSI_STATUS_CLS_SUCCESS)
+		ls->accepts++;
+	else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
+		ls->redirects++;
+		ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
+	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
+		 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
+		ls->authenticate_fails++;
+		ls->last_fail_type =  ISCSI_LOGIN_FAIL_AUTHENTICATE;
+	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
+		 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
+		ls->authorize_fails++;
+		ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
+	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+		 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
+		ls->negotiate_fails++;
+		ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
+	} else {
+		ls->other_fails++;
+		ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
+	}
+
+	/* Save initiator name, ip address and time, if it is a failed login */
+	if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
+		if (conn->param_list)
+			intrname = iscsi_find_param_from_key(INITIATORNAME,
+							     conn->param_list);
+		strlcpy(ls->last_intr_fail_name,
+		       (intrname ? intrname->value : "Unknown"),
+		       sizeof(ls->last_intr_fail_name));
+
+		ls->last_intr_fail_ip_family = conn->login_family;
+
+		ls->last_intr_fail_sockaddr = conn->login_sockaddr;
+		ls->last_fail_time = get_jiffies_64();
+	}
+
+	spin_unlock(&ls->lock);
+}
+
+struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
+{
+	struct iscsi_portal_group *tpg;
+
+	if (!conn || !conn->sess)
+		return NULL;
+
+	tpg = conn->sess->tpg;
+	if (!tpg)
+		return NULL;
+
+	if (!tpg->tpg_tiqn)
+		return NULL;
+
+	return tpg->tpg_tiqn;
+}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
new file mode 100644
index 0000000..995f1cb
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -0,0 +1,60 @@
+#ifndef ISCSI_TARGET_UTIL_H
+#define ISCSI_TARGET_UTIL_H
+
+#define MARKER_SIZE	8
+
+extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
+extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
+extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
+extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
+extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
+extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			       unsigned char * ,__be32 cmdsn);
+extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
+			itt_t, u32);
+extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
+extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
+			struct iscsi_conn_recovery **, itt_t);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
+extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
+extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
+extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
+extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
+extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
+extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
+extern int iscsit_check_session_usage_count(struct iscsi_session *);
+extern void iscsit_dec_session_usage_count(struct iscsi_session *);
+extern void iscsit_inc_session_usage_count(struct iscsi_session *);
+extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
+extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
+extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
+extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
+extern void iscsit_start_nopin_timer(struct iscsi_conn *);
+extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
+extern int iscsit_send_tx_data(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern int iscsit_fe_sendpage_sg(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_tx_login_rsp(struct iscsi_conn *, u8, u8);
+extern void iscsit_print_session_params(struct iscsi_session *);
+extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
+extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
+extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
+extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
+extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
+extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
+extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
new file mode 100644
index 0000000..abe8ecb
--- /dev/null
+++ b/drivers/target/loopback/Kconfig
@@ -0,0 +1,5 @@
+config LOOPBACK_TARGET
+	tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module"
+	help
+	  Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
+	  fabric loopback module.
diff --git a/drivers/target/loopback/Makefile b/drivers/target/loopback/Makefile
new file mode 100644
index 0000000..6abebdf
--- /dev/null
+++ b/drivers/target/loopback/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LOOPBACK_TARGET)	+= tcm_loop.o
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
new file mode 100644
index 0000000..4fb0eca
--- /dev/null
+++ b/drivers/target/loopback/tcm_loop.c
@@ -0,0 +1,1314 @@
+/*******************************************************************************
+ *
+ * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
+ * for emulated SAS initiator ports
+ *
+ * © Copyright 2011-2013 Datera, Inc.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "tcm_loop.h"
+
+#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
+
+static struct workqueue_struct *tcm_loop_workqueue;
+static struct kmem_cache *tcm_loop_cmd_cache;
+
+static int tcm_loop_hba_no_cnt;
+
+static int tcm_loop_queue_status(struct se_cmd *se_cmd);
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free()
+ */
+static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
+{
+	/*
+	 * Do not release struct se_cmd's containing a valid TMR
+	 * pointer.  These will be released directly in tcm_loop_device_reset()
+	 * with transport_generic_free_cmd().
+	 */
+	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+		return 0;
+	/*
+	 * Release the struct se_cmd, which will make a callback to release
+	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
+	 */
+	transport_generic_free_cmd(se_cmd, 0);
+	return 1;
+}
+
+static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+				struct tcm_loop_cmd, tl_se_cmd);
+
+	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+}
+
+static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+	seq_printf(m, "tcm_loop_proc_info()\n");
+	return 0;
+}
+
+static int tcm_loop_driver_probe(struct device *);
+static int tcm_loop_driver_remove(struct device *);
+
+static int pseudo_lld_bus_match(struct device *dev,
+				struct device_driver *dev_driver)
+{
+	return 1;
+}
+
+static struct bus_type tcm_loop_lld_bus = {
+	.name			= "tcm_loop_bus",
+	.match			= pseudo_lld_bus_match,
+	.probe			= tcm_loop_driver_probe,
+	.remove			= tcm_loop_driver_remove,
+};
+
+static struct device_driver tcm_loop_driverfs = {
+	.name			= "tcm_loop",
+	.bus			= &tcm_loop_lld_bus,
+};
+/*
+ * Used with root_device_register() in tcm_loop_alloc_core_bus() below
+ */
+static struct device *tcm_loop_primary;
+
+static void tcm_loop_submission_work(struct work_struct *work)
+{
+	struct tcm_loop_cmd *tl_cmd =
+		container_of(work, struct tcm_loop_cmd, work);
+	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
+	struct scsi_cmnd *sc = tl_cmd->sc;
+	struct tcm_loop_nexus *tl_nexus;
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_tpg *tl_tpg;
+	struct scatterlist *sgl_bidi = NULL;
+	u32 sgl_bidi_count = 0, transfer_length;
+	int rc;
+
+	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+
+	/*
+	 * Ensure that this tl_tpg reference from the incoming sc->device->id
+	 * has already been configured via tcm_loop_make_naa_tpg().
+	 */
+	if (!tl_tpg->tl_hba) {
+		set_host_byte(sc, DID_NO_CONNECT);
+		goto out_done;
+	}
+	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
+		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
+		goto out_done;
+	}
+	tl_nexus = tl_tpg->tl_nexus;
+	if (!tl_nexus) {
+		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
+				" does not exist\n");
+		set_host_byte(sc, DID_ERROR);
+		goto out_done;
+	}
+	if (scsi_bidi_cmnd(sc)) {
+		struct scsi_data_buffer *sdb = scsi_in(sc);
+
+		sgl_bidi = sdb->table.sgl;
+		sgl_bidi_count = sdb->table.nents;
+		se_cmd->se_cmd_flags |= SCF_BIDI;
+
+	}
+
+	transfer_length = scsi_transfer_length(sc);
+	if (!scsi_prot_sg_count(sc) &&
+	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
+		se_cmd->prot_pto = true;
+		/*
+		 * loopback transport doesn't support
+		 * WRITE_GENERATE, READ_STRIP protection
+		 * information operations, go ahead unprotected.
+		 */
+		transfer_length = scsi_bufflen(sc);
+	}
+
+	se_cmd->tag = tl_cmd->sc_cmd_tag;
+	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
+			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
+			transfer_length, TCM_SIMPLE_TAG,
+			sc->sc_data_direction, 0,
+			scsi_sglist(sc), scsi_sg_count(sc),
+			sgl_bidi, sgl_bidi_count,
+			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
+	if (rc < 0) {
+		set_host_byte(sc, DID_NO_CONNECT);
+		goto out_done;
+	}
+	return;
+
+out_done:
+	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+	sc->scsi_done(sc);
+	return;
+}
+
+/*
+ * ->queuecommand can be and usually is called from interrupt context, so
+ * defer the actual submission to a workqueue.
+ */
+static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
+{
+	struct tcm_loop_cmd *tl_cmd;
+
+	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
+		" scsi_buf_len: %u\n", sc->device->host->host_no,
+		sc->device->id, sc->device->channel, sc->device->lun,
+		sc->cmnd[0], scsi_bufflen(sc));
+
+	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
+	if (!tl_cmd) {
+		pr_err("Unable to allocate struct tcm_loop_cmd\n");
+		set_host_byte(sc, DID_ERROR);
+		sc->scsi_done(sc);
+		return 0;
+	}
+
+	tl_cmd->sc = sc;
+	tl_cmd->sc_cmd_tag = sc->request->tag;
+	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
+	queue_work(tcm_loop_workqueue, &tl_cmd->work);
+	return 0;
+}
+
+/*
+ * Called from SCSI EH process context to issue a LUN_RESET TMR
+ * to struct scsi_device
+ */
+static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
+			      u64 lun, int task, enum tcm_tmreq_table tmr)
+{
+	struct se_cmd *se_cmd = NULL;
+	struct se_session *se_sess;
+	struct se_portal_group *se_tpg;
+	struct tcm_loop_nexus *tl_nexus;
+	struct tcm_loop_cmd *tl_cmd = NULL;
+	struct tcm_loop_tmr *tl_tmr = NULL;
+	int ret = TMR_FUNCTION_FAILED, rc;
+
+	/*
+	 * Locate the tl_nexus and se_sess pointers
+	 */
+	tl_nexus = tl_tpg->tl_nexus;
+	if (!tl_nexus) {
+		pr_err("Unable to perform device reset without"
+				" active I_T Nexus\n");
+		return ret;
+	}
+
+	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
+	if (!tl_cmd) {
+		pr_err("Unable to allocate memory for tl_cmd\n");
+		return ret;
+	}
+
+	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
+	if (!tl_tmr) {
+		pr_err("Unable to allocate memory for tl_tmr\n");
+		goto release;
+	}
+	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
+
+	se_cmd = &tl_cmd->tl_se_cmd;
+	se_tpg = &tl_tpg->tl_se_tpg;
+	se_sess = tl_tpg->tl_nexus->se_sess;
+	/*
+	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+	 */
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
+				DMA_NONE, TCM_SIMPLE_TAG,
+				&tl_cmd->tl_sense_buf[0]);
+
+	rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
+	if (rc < 0)
+		goto release;
+
+	if (tmr == TMR_ABORT_TASK)
+		se_cmd->se_tmr_req->ref_task_tag = task;
+
+	/*
+	 * Locate the underlying TCM struct se_lun
+	 */
+	if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
+		ret = TMR_LUN_DOES_NOT_EXIST;
+		goto release;
+	}
+	/*
+	 * Queue the TMR to TCM Core and sleep waiting for
+	 * tcm_loop_queue_tm_rsp() to wake us up.
+	 */
+	transport_generic_handle_tmr(se_cmd);
+	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
+	/*
+	 * The TMR LUN_RESET has completed, check the response status and
+	 * then release allocations.
+	 */
+	ret = se_cmd->se_tmr_req->response;
+release:
+	if (se_cmd)
+		transport_generic_free_cmd(se_cmd, 1);
+	else
+		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+	kfree(tl_tmr);
+	return ret;
+}
+
+static int tcm_loop_abort_task(struct scsi_cmnd *sc)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_tpg *tl_tpg;
+	int ret = FAILED;
+
+	/*
+	 * Locate the tcm_loop_hba_t pointer
+	 */
+	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
+				 sc->request->tag, TMR_ABORT_TASK);
+	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+}
+
+/*
+ * Called from SCSI EH process context to issue a LUN_RESET TMR
+ * to struct scsi_device
+ */
+static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_tpg *tl_tpg;
+	int ret = FAILED;
+
+	/*
+	 * Locate the tcm_loop_hba_t pointer
+	 */
+	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+
+	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
+				 0, TMR_LUN_RESET);
+	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+}
+
+static int tcm_loop_target_reset(struct scsi_cmnd *sc)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_tpg *tl_tpg;
+
+	/*
+	 * Locate the tcm_loop_hba_t pointer
+	 */
+	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+	if (!tl_hba) {
+		pr_err("Unable to perform device reset without"
+				" active I_T Nexus\n");
+		return FAILED;
+	}
+	/*
+	 * Locate the tl_tpg pointer from TargetID in sc->device->id
+	 */
+	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+	if (tl_tpg) {
+		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+		return SUCCESS;
+	}
+	return FAILED;
+}
+
+static int tcm_loop_slave_alloc(struct scsi_device *sd)
+{
+	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
+	return 0;
+}
+
+static struct scsi_host_template tcm_loop_driver_template = {
+	.show_info		= tcm_loop_show_info,
+	.proc_name		= "tcm_loopback",
+	.name			= "TCM_Loopback",
+	.queuecommand		= tcm_loop_queuecommand,
+	.change_queue_depth	= scsi_change_queue_depth,
+	.eh_abort_handler = tcm_loop_abort_task,
+	.eh_device_reset_handler = tcm_loop_device_reset,
+	.eh_target_reset_handler = tcm_loop_target_reset,
+	.can_queue		= 1024,
+	.this_id		= -1,
+	.sg_tablesize		= 256,
+	.cmd_per_lun		= 1024,
+	.max_sectors		= 0xFFFF,
+	.use_clustering		= DISABLE_CLUSTERING,
+	.slave_alloc		= tcm_loop_slave_alloc,
+	.module			= THIS_MODULE,
+	.track_queue_depth	= 1,
+};
+
+static int tcm_loop_driver_probe(struct device *dev)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct Scsi_Host *sh;
+	int error, host_prot;
+
+	tl_hba = to_tcm_loop_hba(dev);
+
+	sh = scsi_host_alloc(&tcm_loop_driver_template,
+			sizeof(struct tcm_loop_hba));
+	if (!sh) {
+		pr_err("Unable to allocate struct scsi_host\n");
+		return -ENODEV;
+	}
+	tl_hba->sh = sh;
+
+	/*
+	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
+	 */
+	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
+	/*
+	 * Setup single ID, Channel and LUN for now..
+	 */
+	sh->max_id = 2;
+	sh->max_lun = 0;
+	sh->max_channel = 0;
+	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+
+	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
+		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
+		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+
+	scsi_host_set_prot(sh, host_prot);
+	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
+
+	error = scsi_add_host(sh, &tl_hba->dev);
+	if (error) {
+		pr_err("%s: scsi_add_host failed\n", __func__);
+		scsi_host_put(sh);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int tcm_loop_driver_remove(struct device *dev)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct Scsi_Host *sh;
+
+	tl_hba = to_tcm_loop_hba(dev);
+	sh = tl_hba->sh;
+
+	scsi_remove_host(sh);
+	scsi_host_put(sh);
+	return 0;
+}
+
+static void tcm_loop_release_adapter(struct device *dev)
+{
+	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
+
+	kfree(tl_hba);
+}
+
+/*
+ * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
+ */
+static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
+{
+	int ret;
+
+	tl_hba->dev.bus = &tcm_loop_lld_bus;
+	tl_hba->dev.parent = tcm_loop_primary;
+	tl_hba->dev.release = &tcm_loop_release_adapter;
+	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
+
+	ret = device_register(&tl_hba->dev);
+	if (ret) {
+		pr_err("device_register() failed for"
+				" tl_hba->dev: %d\n", ret);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/*
+ * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
+ * tcm_loop SCSI bus.
+ */
+static int tcm_loop_alloc_core_bus(void)
+{
+	int ret;
+
+	tcm_loop_primary = root_device_register("tcm_loop_0");
+	if (IS_ERR(tcm_loop_primary)) {
+		pr_err("Unable to allocate tcm_loop_primary\n");
+		return PTR_ERR(tcm_loop_primary);
+	}
+
+	ret = bus_register(&tcm_loop_lld_bus);
+	if (ret) {
+		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
+		goto dev_unreg;
+	}
+
+	ret = driver_register(&tcm_loop_driverfs);
+	if (ret) {
+		pr_err("driver_register() failed for"
+				"tcm_loop_driverfs\n");
+		goto bus_unreg;
+	}
+
+	pr_debug("Initialized TCM Loop Core Bus\n");
+	return ret;
+
+bus_unreg:
+	bus_unregister(&tcm_loop_lld_bus);
+dev_unreg:
+	root_device_unregister(tcm_loop_primary);
+	return ret;
+}
+
+static void tcm_loop_release_core_bus(void)
+{
+	driver_unregister(&tcm_loop_driverfs);
+	bus_unregister(&tcm_loop_lld_bus);
+	root_device_unregister(tcm_loop_primary);
+
+	pr_debug("Releasing TCM Loop Core BUS\n");
+}
+
+static char *tcm_loop_get_fabric_name(void)
+{
+	return "loopback";
+}
+
+static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
+{
+	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
+}
+
+static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+	/*
+	 * Return the passed NAA identifier for the Target Port
+	 */
+	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
+}
+
+static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
+{
+	/*
+	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
+	 * to represent the SCSI Target Port.
+	 */
+	return tl_tpg(se_tpg)->tl_tpgt;
+}
+
+/*
+ * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
+ * based upon the incoming fabric dependent SCSI Initiator Port
+ */
+static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+/*
+ * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
+ * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
+ */
+static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+/*
+ * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
+ * never be called for TCM_Loop by target_core_fabric_configfs.c code.
+ * It has been added here as a nop for target_fabric_tf_ops_check()
+ */
+static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
+{
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+						   tl_se_tpg);
+	return tl_tpg->tl_fabric_prot_type;
+}
+
+static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
+{
+	return 1;
+}
+
+static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
+{
+	return;
+}
+
+static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+			struct tcm_loop_cmd, tl_se_cmd);
+
+	return tl_cmd->sc_cmd_state;
+}
+
+static int tcm_loop_shutdown_session(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static void tcm_loop_close_session(struct se_session *se_sess)
+{
+	return;
+};
+
+static int tcm_loop_write_pending(struct se_cmd *se_cmd)
+{
+	/*
+	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
+	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
+	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
+	 * format with transport_generic_map_mem_to_cmd().
+	 *
+	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
+	 * object execution queue.
+	 */
+	target_execute_cmd(se_cmd);
+	return 0;
+}
+
+static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+				struct tcm_loop_cmd, tl_se_cmd);
+	struct scsi_cmnd *sc = tl_cmd->sc;
+
+	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
+		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
+
+	sc->result = SAM_STAT_GOOD;
+	set_host_byte(sc, DID_OK);
+	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
+	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
+		scsi_set_resid(sc, se_cmd->residual_count);
+	sc->scsi_done(sc);
+	return 0;
+}
+
+static int tcm_loop_queue_status(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+				struct tcm_loop_cmd, tl_se_cmd);
+	struct scsi_cmnd *sc = tl_cmd->sc;
+
+	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
+			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
+
+	if (se_cmd->sense_buffer &&
+	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+
+		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
+				SCSI_SENSE_BUFFERSIZE);
+		sc->result = SAM_STAT_CHECK_CONDITION;
+		set_driver_byte(sc, DRIVER_SENSE);
+	} else
+		sc->result = se_cmd->scsi_status;
+
+	set_host_byte(sc, DID_OK);
+	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
+	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
+		scsi_set_resid(sc, se_cmd->residual_count);
+	sc->scsi_done(sc);
+	return 0;
+}
+
+static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
+	/*
+	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
+	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
+	 */
+	atomic_set(&tl_tmr->tmr_complete, 1);
+	wake_up(&tl_tmr->tl_tmr_wait);
+}
+
+static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
+static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
+{
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return "SAS";
+	case SCSI_PROTOCOL_FCP:
+		return "FCP";
+	case SCSI_PROTOCOL_ISCSI:
+		return "iSCSI";
+	default:
+		break;
+	}
+
+	return "Unknown";
+}
+
+/* Start items for tcm_loop_port_cit */
+
+static int tcm_loop_port_link(
+	struct se_portal_group *se_tpg,
+	struct se_lun *lun)
+{
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+				struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
+	/*
+	 * Add Linux/SCSI struct scsi_device by HCTL
+	 */
+	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
+
+	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
+	return 0;
+}
+
+static void tcm_loop_port_unlink(
+	struct se_portal_group *se_tpg,
+	struct se_lun *se_lun)
+{
+	struct scsi_device *sd;
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_tpg *tl_tpg;
+
+	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
+	tl_hba = tl_tpg->tl_hba;
+
+	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
+				se_lun->unpacked_lun);
+	if (!sd) {
+		pr_err("Unable to locate struct scsi_device for %d:%d:"
+			"%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
+		return;
+	}
+	/*
+	 * Remove Linux/SCSI struct scsi_device by HCTL
+	 */
+	scsi_remove_device(sd);
+	scsi_device_put(sd);
+
+	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
+
+	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
+}
+
+/* End items for tcm_loop_port_cit */
+
+static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
+		struct config_item *item, char *page)
+{
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+						   tl_se_tpg);
+
+	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
+}
+
+static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
+		struct config_item *item, const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+						   tl_se_tpg);
+	unsigned long val;
+	int ret = kstrtoul(page, 0, &val);
+
+	if (ret) {
+		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+		return ret;
+	}
+	if (val != 0 && val != 1 && val != 3) {
+		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
+		return -EINVAL;
+	}
+	tl_tpg->tl_fabric_prot_type = val;
+
+	return count;
+}
+
+CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
+
+static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
+	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
+	NULL,
+};
+
+/* Start items for tcm_loop_nexus_cit */
+
+static int tcm_loop_make_nexus(
+	struct tcm_loop_tpg *tl_tpg,
+	const char *name)
+{
+	struct se_portal_group *se_tpg;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+	struct tcm_loop_nexus *tl_nexus;
+	int ret = -ENOMEM;
+
+	if (tl_tpg->tl_nexus) {
+		pr_debug("tl_tpg->tl_nexus already exists\n");
+		return -EEXIST;
+	}
+	se_tpg = &tl_tpg->tl_se_tpg;
+
+	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
+	if (!tl_nexus) {
+		pr_err("Unable to allocate struct tcm_loop_nexus\n");
+		return -ENOMEM;
+	}
+	/*
+	 * Initialize the struct se_session pointer
+	 */
+	tl_nexus->se_sess = transport_init_session(
+				TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
+	if (IS_ERR(tl_nexus->se_sess)) {
+		ret = PTR_ERR(tl_nexus->se_sess);
+		goto out;
+	}
+	/*
+	 * Since we are running in 'demo mode' this call with generate a
+	 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
+	 * Initiator port name of the passed configfs group 'name'.
+	 */
+	tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+				se_tpg, (unsigned char *)name);
+	if (!tl_nexus->se_sess->se_node_acl) {
+		transport_free_session(tl_nexus->se_sess);
+		goto out;
+	}
+	/* Now, register the I_T Nexus as active. */
+	transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
+			tl_nexus->se_sess, tl_nexus);
+	tl_tpg->tl_nexus = tl_nexus;
+	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+		name);
+	return 0;
+
+out:
+	kfree(tl_nexus);
+	return ret;
+}
+
+static int tcm_loop_drop_nexus(
+	struct tcm_loop_tpg *tpg)
+{
+	struct se_session *se_sess;
+	struct tcm_loop_nexus *tl_nexus;
+
+	tl_nexus = tpg->tl_nexus;
+	if (!tl_nexus)
+		return -ENODEV;
+
+	se_sess = tl_nexus->se_sess;
+	if (!se_sess)
+		return -ENODEV;
+
+	if (atomic_read(&tpg->tl_tpg_port_count)) {
+		pr_err("Unable to remove TCM_Loop I_T Nexus with"
+			" active TPG port count: %d\n",
+			atomic_read(&tpg->tl_tpg_port_count));
+		return -EPERM;
+	}
+
+	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
+		tl_nexus->se_sess->se_node_acl->initiatorname);
+	/*
+	 * Release the SCSI I_T Nexus to the emulated Target Port
+	 */
+	transport_deregister_session(tl_nexus->se_sess);
+	tpg->tl_nexus = NULL;
+	kfree(tl_nexus);
+	return 0;
+}
+
+/* End items for tcm_loop_nexus_cit */
+
+static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+			struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_nexus *tl_nexus;
+	ssize_t ret;
+
+	tl_nexus = tl_tpg->tl_nexus;
+	if (!tl_nexus)
+		return -ENODEV;
+
+	ret = snprintf(page, PAGE_SIZE, "%s\n",
+		tl_nexus->se_sess->se_node_acl->initiatorname);
+
+	return ret;
+}
+
+static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+			struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
+	int ret;
+	/*
+	 * Shutdown the active I_T nexus if 'NULL' is passed..
+	 */
+	if (!strncmp(page, "NULL", 4)) {
+		ret = tcm_loop_drop_nexus(tl_tpg);
+		return (!ret) ? count : ret;
+	}
+	/*
+	 * Otherwise make sure the passed virtual Initiator port WWN matches
+	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
+	 * tcm_loop_make_nexus()
+	 */
+	if (strlen(page) >= TL_WWN_ADDR_LEN) {
+		pr_err("Emulated NAA Sas Address: %s, exceeds"
+				" max: %d\n", page, TL_WWN_ADDR_LEN);
+		return -EINVAL;
+	}
+	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
+
+	ptr = strstr(i_port, "naa.");
+	if (ptr) {
+		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
+			pr_err("Passed SAS Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_loop_dump_proto_id(tl_hba));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[0];
+		goto check_newline;
+	}
+	ptr = strstr(i_port, "fc.");
+	if (ptr) {
+		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
+			pr_err("Passed FCP Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_loop_dump_proto_id(tl_hba));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[3]; /* Skip over "fc." */
+		goto check_newline;
+	}
+	ptr = strstr(i_port, "iqn.");
+	if (ptr) {
+		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
+			pr_err("Passed iSCSI Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_loop_dump_proto_id(tl_hba));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[0];
+		goto check_newline;
+	}
+	pr_err("Unable to locate prefix for emulated Initiator Port:"
+			" %s\n", i_port);
+	return -EINVAL;
+	/*
+	 * Clear any trailing newline for the NAA WWN
+	 */
+check_newline:
+	if (i_port[strlen(i_port)-1] == '\n')
+		i_port[strlen(i_port)-1] = '\0';
+
+	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
+		char *page)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+			struct tcm_loop_tpg, tl_se_tpg);
+	const char *status = NULL;
+	ssize_t ret = -EINVAL;
+
+	switch (tl_tpg->tl_transport_status) {
+	case TCM_TRANSPORT_ONLINE:
+		status = "online";
+		break;
+	case TCM_TRANSPORT_OFFLINE:
+		status = "offline";
+		break;
+	default:
+		break;
+	}
+
+	if (status)
+		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
+
+	return ret;
+}
+
+static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+			struct tcm_loop_tpg, tl_se_tpg);
+
+	if (!strncmp(page, "online", 6)) {
+		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+		return count;
+	}
+	if (!strncmp(page, "offline", 7)) {
+		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
+		if (tl_tpg->tl_nexus) {
+			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
+
+			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
+		}
+		return count;
+	}
+	return -EINVAL;
+}
+
+CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
+CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
+
+static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
+	&tcm_loop_tpg_attr_nexus,
+	&tcm_loop_tpg_attr_transport_status,
+	NULL,
+};
+
+/* Start items for tcm_loop_naa_cit */
+
+static struct se_portal_group *tcm_loop_make_naa_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_loop_hba *tl_hba = container_of(wwn,
+			struct tcm_loop_hba, tl_hba_wwn);
+	struct tcm_loop_tpg *tl_tpg;
+	int ret;
+	unsigned long tpgt;
+
+	if (strstr(name, "tpgt_") != name) {
+		pr_err("Unable to locate \"tpgt_#\" directory"
+				" group\n");
+		return ERR_PTR(-EINVAL);
+	}
+	if (kstrtoul(name+5, 10, &tpgt))
+		return ERR_PTR(-EINVAL);
+
+	if (tpgt >= TL_TPGS_PER_HBA) {
+		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
+				" %u\n", tpgt, TL_TPGS_PER_HBA);
+		return ERR_PTR(-EINVAL);
+	}
+	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
+	tl_tpg->tl_hba = tl_hba;
+	tl_tpg->tl_tpgt = tpgt;
+	/*
+	 * Register the tl_tpg as a emulated TCM Target Endpoint
+	 */
+	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
+	if (ret < 0)
+		return ERR_PTR(-ENOMEM);
+
+	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
+		" Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
+		config_item_name(&wwn->wwn_group.cg_item), tpgt);
+
+	return &tl_tpg->tl_se_tpg;
+}
+
+static void tcm_loop_drop_naa_tpg(
+	struct se_portal_group *se_tpg)
+{
+	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+				struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_hba *tl_hba;
+	unsigned short tpgt;
+
+	tl_hba = tl_tpg->tl_hba;
+	tpgt = tl_tpg->tl_tpgt;
+	/*
+	 * Release the I_T Nexus for the Virtual target link if present
+	 */
+	tcm_loop_drop_nexus(tl_tpg);
+	/*
+	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
+	 */
+	core_tpg_deregister(se_tpg);
+
+	tl_tpg->tl_hba = NULL;
+	tl_tpg->tl_tpgt = 0;
+
+	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
+		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
+		config_item_name(&wwn->wwn_group.cg_item), tpgt);
+}
+
+/* End items for tcm_loop_naa_cit */
+
+/* Start items for tcm_loop_cit */
+
+static struct se_wwn *tcm_loop_make_scsi_hba(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct Scsi_Host *sh;
+	char *ptr;
+	int ret, off = 0;
+
+	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
+	if (!tl_hba) {
+		pr_err("Unable to allocate struct tcm_loop_hba\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	/*
+	 * Determine the emulated Protocol Identifier and Target Port Name
+	 * based on the incoming configfs directory name.
+	 */
+	ptr = strstr(name, "naa.");
+	if (ptr) {
+		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
+		goto check_len;
+	}
+	ptr = strstr(name, "fc.");
+	if (ptr) {
+		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
+		off = 3; /* Skip over "fc." */
+		goto check_len;
+	}
+	ptr = strstr(name, "iqn.");
+	if (!ptr) {
+		pr_err("Unable to locate prefix for emulated Target "
+				"Port: %s\n", name);
+		ret = -EINVAL;
+		goto out;
+	}
+	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
+
+check_len:
+	if (strlen(name) >= TL_WWN_ADDR_LEN) {
+		pr_err("Emulated NAA %s Address: %s, exceeds"
+			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
+			TL_WWN_ADDR_LEN);
+		ret = -EINVAL;
+		goto out;
+	}
+	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
+
+	/*
+	 * Call device_register(tl_hba->dev) to register the emulated
+	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
+	 * device_register() callbacks in tcm_loop_driver_probe()
+	 */
+	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
+	if (ret)
+		goto out;
+
+	sh = tl_hba->sh;
+	tcm_loop_hba_no_cnt++;
+	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
+		" %s Address: %s at Linux/SCSI Host ID: %d\n",
+		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
+
+	return &tl_hba->tl_hba_wwn;
+out:
+	kfree(tl_hba);
+	return ERR_PTR(ret);
+}
+
+static void tcm_loop_drop_scsi_hba(
+	struct se_wwn *wwn)
+{
+	struct tcm_loop_hba *tl_hba = container_of(wwn,
+				struct tcm_loop_hba, tl_hba_wwn);
+
+	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+		" %s Address: %s at Linux/SCSI Host ID: %d\n",
+		tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
+		tl_hba->sh->host_no);
+	/*
+	 * Call device_unregister() on the original tl_hba->dev.
+	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
+	 * release *tl_hba;
+	 */
+	device_unregister(&tl_hba->dev);
+}
+
+/* Start items for tcm_loop_cit */
+static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
+}
+
+CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
+
+static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
+	&tcm_loop_wwn_attr_version,
+	NULL,
+};
+
+/* End items for tcm_loop_cit */
+
+static const struct target_core_fabric_ops loop_ops = {
+	.module				= THIS_MODULE,
+	.name				= "loopback",
+	.get_fabric_name		= tcm_loop_get_fabric_name,
+	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
+	.tpg_get_tag			= tcm_loop_get_tag,
+	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
+	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
+	.tpg_check_demo_mode_write_protect =
+				tcm_loop_check_demo_mode_write_protect,
+	.tpg_check_prod_mode_write_protect =
+				tcm_loop_check_prod_mode_write_protect,
+	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
+	.tpg_get_inst_index		= tcm_loop_get_inst_index,
+	.check_stop_free		= tcm_loop_check_stop_free,
+	.release_cmd			= tcm_loop_release_cmd,
+	.shutdown_session		= tcm_loop_shutdown_session,
+	.close_session			= tcm_loop_close_session,
+	.sess_get_index			= tcm_loop_sess_get_index,
+	.write_pending			= tcm_loop_write_pending,
+	.write_pending_status		= tcm_loop_write_pending_status,
+	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
+	.get_cmd_state			= tcm_loop_get_cmd_state,
+	.queue_data_in			= tcm_loop_queue_data_in,
+	.queue_status			= tcm_loop_queue_status,
+	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
+	.aborted_task			= tcm_loop_aborted_task,
+	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
+	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
+	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
+	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
+	.fabric_post_link		= tcm_loop_port_link,
+	.fabric_pre_unlink		= tcm_loop_port_unlink,
+	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
+	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
+	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
+};
+
+static int __init tcm_loop_fabric_init(void)
+{
+	int ret = -ENOMEM;
+
+	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
+	if (!tcm_loop_workqueue)
+		goto out;
+
+	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
+				sizeof(struct tcm_loop_cmd),
+				__alignof__(struct tcm_loop_cmd),
+				0, NULL);
+	if (!tcm_loop_cmd_cache) {
+		pr_debug("kmem_cache_create() for"
+			" tcm_loop_cmd_cache failed\n");
+		goto out_destroy_workqueue;
+	}
+
+	ret = tcm_loop_alloc_core_bus();
+	if (ret)
+		goto out_destroy_cache;
+
+	ret = target_register_template(&loop_ops);
+	if (ret)
+		goto out_release_core_bus;
+
+	return 0;
+
+out_release_core_bus:
+	tcm_loop_release_core_bus();
+out_destroy_cache:
+	kmem_cache_destroy(tcm_loop_cmd_cache);
+out_destroy_workqueue:
+	destroy_workqueue(tcm_loop_workqueue);
+out:
+	return ret;
+}
+
+static void __exit tcm_loop_fabric_exit(void)
+{
+	target_unregister_template(&loop_ops);
+	tcm_loop_release_core_bus();
+	kmem_cache_destroy(tcm_loop_cmd_cache);
+	destroy_workqueue(tcm_loop_workqueue);
+}
+
+MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
+MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
+MODULE_LICENSE("GPL");
+module_init(tcm_loop_fabric_init);
+module_exit(tcm_loop_fabric_exit);
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
new file mode 100644
index 0000000..4346462
--- /dev/null
+++ b/drivers/target/loopback/tcm_loop.h
@@ -0,0 +1,54 @@
+#define TCM_LOOP_VERSION		"v2.1-rc2"
+#define TL_WWN_ADDR_LEN			256
+#define TL_TPGS_PER_HBA			32
+
+struct tcm_loop_cmd {
+	/* State of Linux/SCSI CDB+Data descriptor */
+	u32 sc_cmd_state;
+	/* Tagged command queueing */
+	u32 sc_cmd_tag;
+	/* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
+	struct scsi_cmnd *sc;
+	/* The TCM I/O descriptor that is accessed via container_of() */
+	struct se_cmd tl_se_cmd;
+	struct work_struct work;
+	/* Sense buffer that will be mapped into outgoing status */
+	unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
+};
+
+struct tcm_loop_tmr {
+	atomic_t tmr_complete;
+	wait_queue_head_t tl_tmr_wait;
+};
+
+struct tcm_loop_nexus {
+	/*
+	 * Pointer to TCM session for I_T Nexus
+	 */
+	struct se_session *se_sess;
+};
+
+#define TCM_TRANSPORT_ONLINE 0
+#define TCM_TRANSPORT_OFFLINE 1
+
+struct tcm_loop_tpg {
+	unsigned short tl_tpgt;
+	unsigned short tl_transport_status;
+	enum target_prot_type tl_fabric_prot_type;
+	atomic_t tl_tpg_port_count;
+	struct se_portal_group tl_se_tpg;
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_nexus *tl_nexus;
+};
+
+struct tcm_loop_hba {
+	u8 tl_proto_id;
+	unsigned char tl_wwn_address[TL_WWN_ADDR_LEN];
+	struct se_hba_s *se_hba;
+	struct se_lun *tl_hba_lun;
+	struct se_port *tl_hba_lun_sep;
+	struct device dev;
+	struct Scsi_Host *sh;
+	struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
+	struct se_wwn tl_hba_wwn;
+};
diff --git a/drivers/target/sbp/Kconfig b/drivers/target/sbp/Kconfig
new file mode 100644
index 0000000..1614bc7
--- /dev/null
+++ b/drivers/target/sbp/Kconfig
@@ -0,0 +1,11 @@
+config SBP_TARGET
+	tristate "FireWire SBP-2 fabric module"
+	depends on FIREWIRE
+	help
+	  Say Y or M here to enable SCSI target functionality over FireWire.
+	  This enables you to expose SCSI devices to other nodes on the FireWire
+	  bus, for example hard disks. Similar to FireWire Target Disk mode on
+	  many Apple computers.
+
+	  To compile this driver as a module, say M here: The module will be
+	  called sbp-target.
diff --git a/drivers/target/sbp/Makefile b/drivers/target/sbp/Makefile
new file mode 100644
index 0000000..27747ad
--- /dev/null
+++ b/drivers/target/sbp/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SBP_TARGET) += sbp_target.o
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
new file mode 100644
index 0000000..35f7d31
--- /dev/null
+++ b/drivers/target/sbp/sbp_target.c
@@ -0,0 +1,2386 @@
+/*
+ * SBP2 target driver (SCSI over IEEE1394 in target mode)
+ *
+ * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define KMSG_COMPONENT "sbp_target"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <asm/unaligned.h>
+
+#include "sbp_target.h"
+
+static const struct target_core_fabric_ops sbp_ops;
+
+/* FireWire address region for management and command block address handlers */
+static const struct fw_address_region sbp_register_region = {
+	.start	= CSR_REGISTER_BASE + 0x10000,
+	.end	= 0x1000000000000ULL,
+};
+
+static const u32 sbp_unit_directory_template[] = {
+	0x1200609e, /* unit_specifier_id: NCITS/T10 */
+	0x13010483, /* unit_sw_version: 1155D Rev 4 */
+	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
+	0x390104d8, /* command_set: SPC-2 */
+	0x3b000000, /* command_set_revision: 0 */
+	0x3c000001, /* firmware_revision: 1 */
+};
+
+#define SESSION_MAINTENANCE_INTERVAL HZ
+
+static atomic_t login_id = ATOMIC_INIT(0);
+
+static void session_maintenance_work(struct work_struct *);
+static int sbp_run_transaction(struct fw_card *, int, int, int, int,
+		unsigned long long, void *, size_t);
+
+static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
+{
+	int ret;
+	__be32 high, low;
+
+	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
+			req->node_addr, req->generation, req->speed,
+			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
+			&high, sizeof(high));
+	if (ret != RCODE_COMPLETE)
+		return ret;
+
+	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
+			req->node_addr, req->generation, req->speed,
+			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
+			&low, sizeof(low));
+	if (ret != RCODE_COMPLETE)
+		return ret;
+
+	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
+
+	return RCODE_COMPLETE;
+}
+
+static struct sbp_session *sbp_session_find_by_guid(
+	struct sbp_tpg *tpg, u64 guid)
+{
+	struct se_session *se_sess;
+	struct sbp_session *sess, *found = NULL;
+
+	spin_lock_bh(&tpg->se_tpg.session_lock);
+	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess->guid == guid)
+			found = sess;
+	}
+	spin_unlock_bh(&tpg->se_tpg.session_lock);
+
+	return found;
+}
+
+static struct sbp_login_descriptor *sbp_login_find_by_lun(
+		struct sbp_session *session, u32 unpacked_lun)
+{
+	struct sbp_login_descriptor *login, *found = NULL;
+
+	spin_lock_bh(&session->lock);
+	list_for_each_entry(login, &session->login_list, link) {
+		if (login->login_lun == unpacked_lun)
+			found = login;
+	}
+	spin_unlock_bh(&session->lock);
+
+	return found;
+}
+
+static int sbp_login_count_all_by_lun(
+		struct sbp_tpg *tpg,
+		u32 unpacked_lun,
+		int exclusive)
+{
+	struct se_session *se_sess;
+	struct sbp_session *sess;
+	struct sbp_login_descriptor *login;
+	int count = 0;
+
+	spin_lock_bh(&tpg->se_tpg.session_lock);
+	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
+		sess = se_sess->fabric_sess_ptr;
+
+		spin_lock_bh(&sess->lock);
+		list_for_each_entry(login, &sess->login_list, link) {
+			if (login->login_lun != unpacked_lun)
+				continue;
+
+			if (!exclusive || login->exclusive)
+				count++;
+		}
+		spin_unlock_bh(&sess->lock);
+	}
+	spin_unlock_bh(&tpg->se_tpg.session_lock);
+
+	return count;
+}
+
+static struct sbp_login_descriptor *sbp_login_find_by_id(
+	struct sbp_tpg *tpg, int login_id)
+{
+	struct se_session *se_sess;
+	struct sbp_session *sess;
+	struct sbp_login_descriptor *login, *found = NULL;
+
+	spin_lock_bh(&tpg->se_tpg.session_lock);
+	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
+		sess = se_sess->fabric_sess_ptr;
+
+		spin_lock_bh(&sess->lock);
+		list_for_each_entry(login, &sess->login_list, link) {
+			if (login->login_id == login_id)
+				found = login;
+		}
+		spin_unlock_bh(&sess->lock);
+	}
+	spin_unlock_bh(&tpg->se_tpg.session_lock);
+
+	return found;
+}
+
+static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
+{
+	struct se_portal_group *se_tpg = &tpg->se_tpg;
+	struct se_lun *se_lun;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
+		if (se_lun->unpacked_lun == login_lun) {
+			rcu_read_unlock();
+			*err = 0;
+			return login_lun;
+		}
+	}
+	rcu_read_unlock();
+
+	*err = -ENODEV;
+	return login_lun;
+}
+
+static struct sbp_session *sbp_session_create(
+		struct sbp_tpg *tpg,
+		u64 guid)
+{
+	struct sbp_session *sess;
+	int ret;
+	char guid_str[17];
+	struct se_node_acl *se_nacl;
+
+	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
+	if (!sess) {
+		pr_err("failed to allocate session descriptor\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
+	if (IS_ERR(sess->se_sess)) {
+		pr_err("failed to init se_session\n");
+
+		ret = PTR_ERR(sess->se_sess);
+		kfree(sess);
+		return ERR_PTR(ret);
+	}
+
+	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
+
+	se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
+	if (!se_nacl) {
+		pr_warn("Node ACL not found for %s\n", guid_str);
+
+		transport_free_session(sess->se_sess);
+		kfree(sess);
+
+		return ERR_PTR(-EPERM);
+	}
+
+	sess->se_sess->se_node_acl = se_nacl;
+
+	spin_lock_init(&sess->lock);
+	INIT_LIST_HEAD(&sess->login_list);
+	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
+
+	sess->guid = guid;
+
+	transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
+
+	return sess;
+}
+
+static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
+{
+	spin_lock_bh(&sess->lock);
+	if (!list_empty(&sess->login_list)) {
+		spin_unlock_bh(&sess->lock);
+		return;
+	}
+	spin_unlock_bh(&sess->lock);
+
+	if (cancel_work)
+		cancel_delayed_work_sync(&sess->maint_work);
+
+	transport_deregister_session_configfs(sess->se_sess);
+	transport_deregister_session(sess->se_sess);
+
+	if (sess->card)
+		fw_card_put(sess->card);
+
+	kfree(sess);
+}
+
+static void sbp_target_agent_unregister(struct sbp_target_agent *);
+
+static void sbp_login_release(struct sbp_login_descriptor *login,
+	bool cancel_work)
+{
+	struct sbp_session *sess = login->sess;
+
+	/* FIXME: abort/wait on tasks */
+
+	sbp_target_agent_unregister(login->tgt_agt);
+
+	if (sess) {
+		spin_lock_bh(&sess->lock);
+		list_del(&login->link);
+		spin_unlock_bh(&sess->lock);
+
+		sbp_session_release(sess, cancel_work);
+	}
+
+	kfree(login);
+}
+
+static struct sbp_target_agent *sbp_target_agent_register(
+	struct sbp_login_descriptor *);
+
+static void sbp_management_request_login(
+	struct sbp_management_agent *agent, struct sbp_management_request *req,
+	int *status_data_size)
+{
+	struct sbp_tport *tport = agent->tport;
+	struct sbp_tpg *tpg = tport->tpg;
+	struct sbp_session *sess;
+	struct sbp_login_descriptor *login;
+	struct sbp_login_response_block *response;
+	u64 guid;
+	u32 unpacked_lun;
+	int login_response_len, ret;
+
+	unpacked_lun = sbp_get_lun_from_tpg(tpg,
+			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
+	if (ret) {
+		pr_notice("login to unknown LUN: %d\n",
+			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
+		return;
+	}
+
+	ret = read_peer_guid(&guid, req);
+	if (ret != RCODE_COMPLETE) {
+		pr_warn("failed to read peer GUID: %d\n", ret);
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+		return;
+	}
+
+	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
+		unpacked_lun, guid);
+
+	sess = sbp_session_find_by_guid(tpg, guid);
+	if (sess) {
+		login = sbp_login_find_by_lun(sess, unpacked_lun);
+		if (login) {
+			pr_notice("initiator already logged-in\n");
+
+			/*
+			 * SBP-2 R4 says we should return access denied, but
+			 * that can confuse initiators. Instead we need to
+			 * treat this like a reconnect, but send the login
+			 * response block like a fresh login.
+			 *
+			 * This is required particularly in the case of Apple
+			 * devices booting off the FireWire target, where
+			 * the firmware has an active login to the target. When
+			 * the OS takes control of the session it issues its own
+			 * LOGIN rather than a RECONNECT. To avoid the machine
+			 * waiting until the reconnect_hold expires, we can skip
+			 * the ACCESS_DENIED errors to speed things up.
+			 */
+
+			goto already_logged_in;
+		}
+	}
+
+	/*
+	 * check exclusive bit in login request
+	 * reject with access_denied if any logins present
+	 */
+	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
+			sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
+		pr_warn("refusing exclusive login with other active logins\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+		return;
+	}
+
+	/*
+	 * check exclusive bit in any existing login descriptor
+	 * reject with access_denied if any exclusive logins present
+	 */
+	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
+		pr_warn("refusing login while another exclusive login present\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+		return;
+	}
+
+	/*
+	 * check we haven't exceeded the number of allowed logins
+	 * reject with resources_unavailable if we have
+	 */
+	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
+			tport->max_logins_per_lun) {
+		pr_warn("max number of logins reached\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+		return;
+	}
+
+	if (!sess) {
+		sess = sbp_session_create(tpg, guid);
+		if (IS_ERR(sess)) {
+			switch (PTR_ERR(sess)) {
+			case -EPERM:
+				ret = SBP_STATUS_ACCESS_DENIED;
+				break;
+			default:
+				ret = SBP_STATUS_RESOURCES_UNAVAIL;
+				break;
+			}
+
+			req->status.status = cpu_to_be32(
+				STATUS_BLOCK_RESP(
+					STATUS_RESP_REQUEST_COMPLETE) |
+				STATUS_BLOCK_SBP_STATUS(ret));
+			return;
+		}
+
+		sess->node_id = req->node_addr;
+		sess->card = fw_card_get(req->card);
+		sess->generation = req->generation;
+		sess->speed = req->speed;
+
+		schedule_delayed_work(&sess->maint_work,
+				SESSION_MAINTENANCE_INTERVAL);
+	}
+
+	/* only take the latest reconnect_hold into account */
+	sess->reconnect_hold = min(
+		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
+		tport->max_reconnect_timeout) - 1;
+
+	login = kmalloc(sizeof(*login), GFP_KERNEL);
+	if (!login) {
+		pr_err("failed to allocate login descriptor\n");
+
+		sbp_session_release(sess, true);
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+		return;
+	}
+
+	login->sess = sess;
+	login->login_lun = unpacked_lun;
+	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
+	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
+	login->login_id = atomic_inc_return(&login_id);
+
+	login->tgt_agt = sbp_target_agent_register(login);
+	if (IS_ERR(login->tgt_agt)) {
+		ret = PTR_ERR(login->tgt_agt);
+		pr_err("failed to map command block handler: %d\n", ret);
+
+		sbp_session_release(sess, true);
+		kfree(login);
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+		return;
+	}
+
+	spin_lock_bh(&sess->lock);
+	list_add_tail(&login->link, &sess->login_list);
+	spin_unlock_bh(&sess->lock);
+
+already_logged_in:
+	response = kzalloc(sizeof(*response), GFP_KERNEL);
+	if (!response) {
+		pr_err("failed to allocate login response block\n");
+
+		sbp_login_release(login, true);
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+		return;
+	}
+
+	login_response_len = clamp_val(
+			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
+			12, sizeof(*response));
+	response->misc = cpu_to_be32(
+		((login_response_len & 0xffff) << 16) |
+		(login->login_id & 0xffff));
+	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
+	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
+		&response->command_block_agent);
+
+	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
+		sess->node_id, sess->generation, sess->speed,
+		sbp2_pointer_to_addr(&req->orb.ptr2), response,
+		login_response_len);
+	if (ret != RCODE_COMPLETE) {
+		pr_debug("failed to write login response block: %x\n", ret);
+
+		kfree(response);
+		sbp_login_release(login, true);
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+		return;
+	}
+
+	kfree(response);
+
+	req->status.status = cpu_to_be32(
+		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static void sbp_management_request_query_logins(
+	struct sbp_management_agent *agent, struct sbp_management_request *req,
+	int *status_data_size)
+{
+	pr_notice("QUERY LOGINS not implemented\n");
+	/* FIXME: implement */
+
+	req->status.status = cpu_to_be32(
+		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+}
+
+static void sbp_management_request_reconnect(
+	struct sbp_management_agent *agent, struct sbp_management_request *req,
+	int *status_data_size)
+{
+	struct sbp_tport *tport = agent->tport;
+	struct sbp_tpg *tpg = tport->tpg;
+	int ret;
+	u64 guid;
+	struct sbp_login_descriptor *login;
+
+	ret = read_peer_guid(&guid, req);
+	if (ret != RCODE_COMPLETE) {
+		pr_warn("failed to read peer GUID: %d\n", ret);
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+		return;
+	}
+
+	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
+
+	login = sbp_login_find_by_id(tpg,
+		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
+
+	if (!login) {
+		pr_err("mgt_agent RECONNECT unknown login ID\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+		return;
+	}
+
+	if (login->sess->guid != guid) {
+		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+		return;
+	}
+
+	spin_lock_bh(&login->sess->lock);
+	if (login->sess->card)
+		fw_card_put(login->sess->card);
+
+	/* update the node details */
+	login->sess->generation = req->generation;
+	login->sess->node_id = req->node_addr;
+	login->sess->card = fw_card_get(req->card);
+	login->sess->speed = req->speed;
+	spin_unlock_bh(&login->sess->lock);
+
+	req->status.status = cpu_to_be32(
+		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static void sbp_management_request_logout(
+	struct sbp_management_agent *agent, struct sbp_management_request *req,
+	int *status_data_size)
+{
+	struct sbp_tport *tport = agent->tport;
+	struct sbp_tpg *tpg = tport->tpg;
+	int id;
+	struct sbp_login_descriptor *login;
+
+	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
+
+	login = sbp_login_find_by_id(tpg, id);
+	if (!login) {
+		pr_warn("cannot find login: %d\n", id);
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
+		return;
+	}
+
+	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
+		login->login_lun, login->login_id);
+
+	if (req->node_addr != login->sess->node_id) {
+		pr_warn("logout from different node ID\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+		return;
+	}
+
+	sbp_login_release(login, true);
+
+	req->status.status = cpu_to_be32(
+		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static void session_check_for_reset(struct sbp_session *sess)
+{
+	bool card_valid = false;
+
+	spin_lock_bh(&sess->lock);
+
+	if (sess->card) {
+		spin_lock_irq(&sess->card->lock);
+		card_valid = (sess->card->local_node != NULL);
+		spin_unlock_irq(&sess->card->lock);
+
+		if (!card_valid) {
+			fw_card_put(sess->card);
+			sess->card = NULL;
+		}
+	}
+
+	if (!card_valid || (sess->generation != sess->card->generation)) {
+		pr_info("Waiting for reconnect from node: %016llx\n",
+				sess->guid);
+
+		sess->node_id = -1;
+		sess->reconnect_expires = get_jiffies_64() +
+			((sess->reconnect_hold + 1) * HZ);
+	}
+
+	spin_unlock_bh(&sess->lock);
+}
+
+static void session_reconnect_expired(struct sbp_session *sess)
+{
+	struct sbp_login_descriptor *login, *temp;
+	LIST_HEAD(login_list);
+
+	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
+
+	spin_lock_bh(&sess->lock);
+	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
+		login->sess = NULL;
+		list_move_tail(&login->link, &login_list);
+	}
+	spin_unlock_bh(&sess->lock);
+
+	list_for_each_entry_safe(login, temp, &login_list, link) {
+		list_del(&login->link);
+		sbp_login_release(login, false);
+	}
+
+	sbp_session_release(sess, false);
+}
+
+static void session_maintenance_work(struct work_struct *work)
+{
+	struct sbp_session *sess = container_of(work, struct sbp_session,
+			maint_work.work);
+
+	/* could be called while tearing down the session */
+	spin_lock_bh(&sess->lock);
+	if (list_empty(&sess->login_list)) {
+		spin_unlock_bh(&sess->lock);
+		return;
+	}
+	spin_unlock_bh(&sess->lock);
+
+	if (sess->node_id != -1) {
+		/* check for bus reset and make node_id invalid */
+		session_check_for_reset(sess);
+
+		schedule_delayed_work(&sess->maint_work,
+				SESSION_MAINTENANCE_INTERVAL);
+	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
+		/* still waiting for reconnect */
+		schedule_delayed_work(&sess->maint_work,
+				SESSION_MAINTENANCE_INTERVAL);
+	} else {
+		/* reconnect timeout has expired */
+		session_reconnect_expired(sess);
+	}
+}
+
+static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
+		struct sbp_target_agent *agent)
+{
+	int state;
+
+	switch (tcode) {
+	case TCODE_READ_QUADLET_REQUEST:
+		pr_debug("tgt_agent AGENT_STATE READ\n");
+
+		spin_lock_bh(&agent->lock);
+		state = agent->state;
+		spin_unlock_bh(&agent->lock);
+
+		*(__be32 *)data = cpu_to_be32(state);
+
+		return RCODE_COMPLETE;
+
+	case TCODE_WRITE_QUADLET_REQUEST:
+		/* ignored */
+		return RCODE_COMPLETE;
+
+	default:
+		return RCODE_TYPE_ERROR;
+	}
+}
+
+static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
+		struct sbp_target_agent *agent)
+{
+	switch (tcode) {
+	case TCODE_WRITE_QUADLET_REQUEST:
+		pr_debug("tgt_agent AGENT_RESET\n");
+		spin_lock_bh(&agent->lock);
+		agent->state = AGENT_STATE_RESET;
+		spin_unlock_bh(&agent->lock);
+		return RCODE_COMPLETE;
+
+	default:
+		return RCODE_TYPE_ERROR;
+	}
+}
+
+static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
+		struct sbp_target_agent *agent)
+{
+	struct sbp2_pointer *ptr = data;
+
+	switch (tcode) {
+	case TCODE_WRITE_BLOCK_REQUEST:
+		spin_lock_bh(&agent->lock);
+		if (agent->state != AGENT_STATE_SUSPENDED &&
+				agent->state != AGENT_STATE_RESET) {
+			spin_unlock_bh(&agent->lock);
+			pr_notice("Ignoring ORB_POINTER write while active.\n");
+			return RCODE_CONFLICT_ERROR;
+		}
+		agent->state = AGENT_STATE_ACTIVE;
+		spin_unlock_bh(&agent->lock);
+
+		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
+		agent->doorbell = false;
+
+		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
+				agent->orb_pointer);
+
+		queue_work(system_unbound_wq, &agent->work);
+
+		return RCODE_COMPLETE;
+
+	case TCODE_READ_BLOCK_REQUEST:
+		pr_debug("tgt_agent ORB_POINTER READ\n");
+		spin_lock_bh(&agent->lock);
+		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
+		spin_unlock_bh(&agent->lock);
+		return RCODE_COMPLETE;
+
+	default:
+		return RCODE_TYPE_ERROR;
+	}
+}
+
+static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
+		struct sbp_target_agent *agent)
+{
+	switch (tcode) {
+	case TCODE_WRITE_QUADLET_REQUEST:
+		spin_lock_bh(&agent->lock);
+		if (agent->state != AGENT_STATE_SUSPENDED) {
+			spin_unlock_bh(&agent->lock);
+			pr_debug("Ignoring DOORBELL while active.\n");
+			return RCODE_CONFLICT_ERROR;
+		}
+		agent->state = AGENT_STATE_ACTIVE;
+		spin_unlock_bh(&agent->lock);
+
+		agent->doorbell = true;
+
+		pr_debug("tgt_agent DOORBELL\n");
+
+		queue_work(system_unbound_wq, &agent->work);
+
+		return RCODE_COMPLETE;
+
+	case TCODE_READ_QUADLET_REQUEST:
+		return RCODE_COMPLETE;
+
+	default:
+		return RCODE_TYPE_ERROR;
+	}
+}
+
+static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
+		int tcode, void *data, struct sbp_target_agent *agent)
+{
+	switch (tcode) {
+	case TCODE_WRITE_QUADLET_REQUEST:
+		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
+		/* ignored as we don't send unsolicited status */
+		return RCODE_COMPLETE;
+
+	case TCODE_READ_QUADLET_REQUEST:
+		return RCODE_COMPLETE;
+
+	default:
+		return RCODE_TYPE_ERROR;
+	}
+}
+
+static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
+		int tcode, int destination, int source, int generation,
+		unsigned long long offset, void *data, size_t length,
+		void *callback_data)
+{
+	struct sbp_target_agent *agent = callback_data;
+	struct sbp_session *sess = agent->login->sess;
+	int sess_gen, sess_node, rcode;
+
+	spin_lock_bh(&sess->lock);
+	sess_gen = sess->generation;
+	sess_node = sess->node_id;
+	spin_unlock_bh(&sess->lock);
+
+	if (generation != sess_gen) {
+		pr_notice("ignoring request with wrong generation\n");
+		rcode = RCODE_TYPE_ERROR;
+		goto out;
+	}
+
+	if (source != sess_node) {
+		pr_notice("ignoring request from foreign node (%x != %x)\n",
+				source, sess_node);
+		rcode = RCODE_TYPE_ERROR;
+		goto out;
+	}
+
+	/* turn offset into the offset from the start of the block */
+	offset -= agent->handler.offset;
+
+	if (offset == 0x00 && length == 4) {
+		/* AGENT_STATE */
+		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
+	} else if (offset == 0x04 && length == 4) {
+		/* AGENT_RESET */
+		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
+	} else if (offset == 0x08 && length == 8) {
+		/* ORB_POINTER */
+		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
+	} else if (offset == 0x10 && length == 4) {
+		/* DOORBELL */
+		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
+	} else if (offset == 0x14 && length == 4) {
+		/* UNSOLICITED_STATUS_ENABLE */
+		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
+				data, agent);
+	} else {
+		rcode = RCODE_ADDRESS_ERROR;
+	}
+
+out:
+	fw_send_response(card, request, rcode);
+}
+
+static void sbp_handle_command(struct sbp_target_request *);
+static int sbp_send_status(struct sbp_target_request *);
+static void sbp_free_request(struct sbp_target_request *);
+
+static void tgt_agent_process_work(struct work_struct *work)
+{
+	struct sbp_target_request *req =
+		container_of(work, struct sbp_target_request, work);
+
+	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
+			req->orb_pointer,
+			sbp2_pointer_to_addr(&req->orb.next_orb),
+			sbp2_pointer_to_addr(&req->orb.data_descriptor),
+			be32_to_cpu(req->orb.misc));
+
+	if (req->orb_pointer >> 32)
+		pr_debug("ORB with high bits set\n");
+
+	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
+		case 0:/* Format specified by this standard */
+			sbp_handle_command(req);
+			return;
+		case 1: /* Reserved for future standardization */
+		case 2: /* Vendor-dependent */
+			req->status.status |= cpu_to_be32(
+					STATUS_BLOCK_RESP(
+						STATUS_RESP_REQUEST_COMPLETE) |
+					STATUS_BLOCK_DEAD(0) |
+					STATUS_BLOCK_LEN(1) |
+					STATUS_BLOCK_SBP_STATUS(
+						SBP_STATUS_REQ_TYPE_NOTSUPP));
+			sbp_send_status(req);
+			sbp_free_request(req);
+			return;
+		case 3: /* Dummy ORB */
+			req->status.status |= cpu_to_be32(
+					STATUS_BLOCK_RESP(
+						STATUS_RESP_REQUEST_COMPLETE) |
+					STATUS_BLOCK_DEAD(0) |
+					STATUS_BLOCK_LEN(1) |
+					STATUS_BLOCK_SBP_STATUS(
+						SBP_STATUS_DUMMY_ORB_COMPLETE));
+			sbp_send_status(req);
+			sbp_free_request(req);
+			return;
+		default:
+			BUG();
+	}
+}
+
+/* used to double-check we haven't been issued an AGENT_RESET */
+static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
+{
+	bool active;
+
+	spin_lock_bh(&agent->lock);
+	active = (agent->state == AGENT_STATE_ACTIVE);
+	spin_unlock_bh(&agent->lock);
+
+	return active;
+}
+
+static void tgt_agent_fetch_work(struct work_struct *work)
+{
+	struct sbp_target_agent *agent =
+		container_of(work, struct sbp_target_agent, work);
+	struct sbp_session *sess = agent->login->sess;
+	struct sbp_target_request *req;
+	int ret;
+	bool doorbell = agent->doorbell;
+	u64 next_orb = agent->orb_pointer;
+
+	while (next_orb && tgt_agent_check_active(agent)) {
+		req = kzalloc(sizeof(*req), GFP_KERNEL);
+		if (!req) {
+			spin_lock_bh(&agent->lock);
+			agent->state = AGENT_STATE_DEAD;
+			spin_unlock_bh(&agent->lock);
+			return;
+		}
+
+		req->login = agent->login;
+		req->orb_pointer = next_orb;
+
+		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
+					req->orb_pointer >> 32));
+		req->status.orb_low = cpu_to_be32(
+				req->orb_pointer & 0xfffffffc);
+
+		/* read in the ORB */
+		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
+				sess->node_id, sess->generation, sess->speed,
+				req->orb_pointer, &req->orb, sizeof(req->orb));
+		if (ret != RCODE_COMPLETE) {
+			pr_debug("tgt_orb fetch failed: %x\n", ret);
+			req->status.status |= cpu_to_be32(
+					STATUS_BLOCK_SRC(
+						STATUS_SRC_ORB_FINISHED) |
+					STATUS_BLOCK_RESP(
+						STATUS_RESP_TRANSPORT_FAILURE) |
+					STATUS_BLOCK_DEAD(1) |
+					STATUS_BLOCK_LEN(1) |
+					STATUS_BLOCK_SBP_STATUS(
+						SBP_STATUS_UNSPECIFIED_ERROR));
+			spin_lock_bh(&agent->lock);
+			agent->state = AGENT_STATE_DEAD;
+			spin_unlock_bh(&agent->lock);
+
+			sbp_send_status(req);
+			sbp_free_request(req);
+			return;
+		}
+
+		/* check the next_ORB field */
+		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
+			next_orb = 0;
+			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
+						STATUS_SRC_ORB_FINISHED));
+		} else {
+			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
+			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
+						STATUS_SRC_ORB_CONTINUING));
+		}
+
+		if (tgt_agent_check_active(agent) && !doorbell) {
+			INIT_WORK(&req->work, tgt_agent_process_work);
+			queue_work(system_unbound_wq, &req->work);
+		} else {
+			/* don't process this request, just check next_ORB */
+			sbp_free_request(req);
+		}
+
+		spin_lock_bh(&agent->lock);
+		doorbell = agent->doorbell = false;
+
+		/* check if we should carry on processing */
+		if (next_orb)
+			agent->orb_pointer = next_orb;
+		else
+			agent->state = AGENT_STATE_SUSPENDED;
+
+		spin_unlock_bh(&agent->lock);
+	};
+}
+
+static struct sbp_target_agent *sbp_target_agent_register(
+		struct sbp_login_descriptor *login)
+{
+	struct sbp_target_agent *agent;
+	int ret;
+
+	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
+	if (!agent)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&agent->lock);
+
+	agent->handler.length = 0x20;
+	agent->handler.address_callback = tgt_agent_rw;
+	agent->handler.callback_data = agent;
+
+	agent->login = login;
+	agent->state = AGENT_STATE_RESET;
+	INIT_WORK(&agent->work, tgt_agent_fetch_work);
+	agent->orb_pointer = 0;
+	agent->doorbell = false;
+
+	ret = fw_core_add_address_handler(&agent->handler,
+			&sbp_register_region);
+	if (ret < 0) {
+		kfree(agent);
+		return ERR_PTR(ret);
+	}
+
+	return agent;
+}
+
+static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
+{
+	fw_core_remove_address_handler(&agent->handler);
+	cancel_work_sync(&agent->work);
+	kfree(agent);
+}
+
+/*
+ * Simple wrapper around fw_run_transaction that retries the transaction several
+ * times in case of failure, with an exponential backoff.
+ */
+static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
+		int generation, int speed, unsigned long long offset,
+		void *payload, size_t length)
+{
+	int attempt, ret, delay;
+
+	for (attempt = 1; attempt <= 5; attempt++) {
+		ret = fw_run_transaction(card, tcode, destination_id,
+				generation, speed, offset, payload, length);
+
+		switch (ret) {
+		case RCODE_COMPLETE:
+		case RCODE_TYPE_ERROR:
+		case RCODE_ADDRESS_ERROR:
+		case RCODE_GENERATION:
+			return ret;
+
+		default:
+			delay = 5 * attempt * attempt;
+			usleep_range(delay, delay * 2);
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * Wrapper around sbp_run_transaction that gets the card, destination,
+ * generation and speed out of the request's session.
+ */
+static int sbp_run_request_transaction(struct sbp_target_request *req,
+		int tcode, unsigned long long offset, void *payload,
+		size_t length)
+{
+	struct sbp_login_descriptor *login = req->login;
+	struct sbp_session *sess = login->sess;
+	struct fw_card *card;
+	int node_id, generation, speed, ret;
+
+	spin_lock_bh(&sess->lock);
+	card = fw_card_get(sess->card);
+	node_id = sess->node_id;
+	generation = sess->generation;
+	speed = sess->speed;
+	spin_unlock_bh(&sess->lock);
+
+	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
+			offset, payload, length);
+
+	fw_card_put(card);
+
+	return ret;
+}
+
+static int sbp_fetch_command(struct sbp_target_request *req)
+{
+	int ret, cmd_len, copy_len;
+
+	cmd_len = scsi_command_size(req->orb.command_block);
+
+	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
+	if (!req->cmd_buf)
+		return -ENOMEM;
+
+	memcpy(req->cmd_buf, req->orb.command_block,
+		min_t(int, cmd_len, sizeof(req->orb.command_block)));
+
+	if (cmd_len > sizeof(req->orb.command_block)) {
+		pr_debug("sbp_fetch_command: filling in long command\n");
+		copy_len = cmd_len - sizeof(req->orb.command_block);
+
+		ret = sbp_run_request_transaction(req,
+				TCODE_READ_BLOCK_REQUEST,
+				req->orb_pointer + sizeof(req->orb),
+				req->cmd_buf + sizeof(req->orb.command_block),
+				copy_len);
+		if (ret != RCODE_COMPLETE)
+			return -EIO;
+	}
+
+	return 0;
+}
+
+static int sbp_fetch_page_table(struct sbp_target_request *req)
+{
+	int pg_tbl_sz, ret;
+	struct sbp_page_table_entry *pg_tbl;
+
+	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
+		return 0;
+
+	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
+		sizeof(struct sbp_page_table_entry);
+
+	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
+	if (!pg_tbl)
+		return -ENOMEM;
+
+	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
+			sbp2_pointer_to_addr(&req->orb.data_descriptor),
+			pg_tbl, pg_tbl_sz);
+	if (ret != RCODE_COMPLETE) {
+		kfree(pg_tbl);
+		return -EIO;
+	}
+
+	req->pg_tbl = pg_tbl;
+	return 0;
+}
+
+static void sbp_calc_data_length_direction(struct sbp_target_request *req,
+	u32 *data_len, enum dma_data_direction *data_dir)
+{
+	int data_size, direction, idx;
+
+	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
+	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
+
+	if (!data_size) {
+		*data_len = 0;
+		*data_dir = DMA_NONE;
+		return;
+	}
+
+	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+	if (req->pg_tbl) {
+		*data_len = 0;
+		for (idx = 0; idx < data_size; idx++) {
+			*data_len += be16_to_cpu(
+					req->pg_tbl[idx].segment_length);
+		}
+	} else {
+		*data_len = data_size;
+	}
+}
+
+static void sbp_handle_command(struct sbp_target_request *req)
+{
+	struct sbp_login_descriptor *login = req->login;
+	struct sbp_session *sess = login->sess;
+	int ret, unpacked_lun;
+	u32 data_length;
+	enum dma_data_direction data_dir;
+
+	ret = sbp_fetch_command(req);
+	if (ret) {
+		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = sbp_fetch_page_table(req);
+	if (ret) {
+		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
+			ret);
+		goto err;
+	}
+
+	unpacked_lun = req->login->login_lun;
+	sbp_calc_data_length_direction(req, &data_length, &data_dir);
+
+	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
+			req->orb_pointer, unpacked_lun, data_length, data_dir);
+
+	/* only used for printk until we do TMRs */
+	req->se_cmd.tag = req->orb_pointer;
+	if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
+			      req->sense_buf, unpacked_lun, data_length,
+			      TCM_SIMPLE_TAG, data_dir, 0))
+		goto err;
+
+	return;
+
+err:
+	req->status.status |= cpu_to_be32(
+		STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+		STATUS_BLOCK_DEAD(0) |
+		STATUS_BLOCK_LEN(1) |
+		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+	sbp_send_status(req);
+	sbp_free_request(req);
+}
+
+/*
+ * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
+ * DMA_FROM_DEVICE = write to initiator (SCSI READ)
+ */
+static int sbp_rw_data(struct sbp_target_request *req)
+{
+	struct sbp_session *sess = req->login->sess;
+	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
+		generation, num_pte, length, tfr_length,
+		rcode = RCODE_COMPLETE;
+	struct sbp_page_table_entry *pte;
+	unsigned long long offset;
+	struct fw_card *card;
+	struct sg_mapping_iter iter;
+
+	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
+		tcode = TCODE_WRITE_BLOCK_REQUEST;
+		sg_miter_flags = SG_MITER_FROM_SG;
+	} else {
+		tcode = TCODE_READ_BLOCK_REQUEST;
+		sg_miter_flags = SG_MITER_TO_SG;
+	}
+
+	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
+	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
+
+	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
+	if (pg_size) {
+		pr_err("sbp_run_transaction: page size ignored\n");
+		pg_size = 0x100 << pg_size;
+	}
+
+	spin_lock_bh(&sess->lock);
+	card = fw_card_get(sess->card);
+	node_id = sess->node_id;
+	generation = sess->generation;
+	spin_unlock_bh(&sess->lock);
+
+	if (req->pg_tbl) {
+		pte = req->pg_tbl;
+		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
+
+		offset = 0;
+		length = 0;
+	} else {
+		pte = NULL;
+		num_pte = 0;
+
+		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
+		length = req->se_cmd.data_length;
+	}
+
+	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
+		sg_miter_flags);
+
+	while (length || num_pte) {
+		if (!length) {
+			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
+				be32_to_cpu(pte->segment_base_lo);
+			length = be16_to_cpu(pte->segment_length);
+
+			pte++;
+			num_pte--;
+		}
+
+		sg_miter_next(&iter);
+
+		tfr_length = min3(length, max_payload, (int)iter.length);
+
+		/* FIXME: take page_size into account */
+
+		rcode = sbp_run_transaction(card, tcode, node_id,
+				generation, speed,
+				offset, iter.addr, tfr_length);
+
+		if (rcode != RCODE_COMPLETE)
+			break;
+
+		length -= tfr_length;
+		offset += tfr_length;
+		iter.consumed = tfr_length;
+	}
+
+	sg_miter_stop(&iter);
+	fw_card_put(card);
+
+	if (rcode == RCODE_COMPLETE) {
+		WARN_ON(length != 0);
+		return 0;
+	} else {
+		return -EIO;
+	}
+}
+
+static int sbp_send_status(struct sbp_target_request *req)
+{
+	int ret, length;
+	struct sbp_login_descriptor *login = req->login;
+
+	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
+
+	ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
+			login->status_fifo_addr, &req->status, length);
+	if (ret != RCODE_COMPLETE) {
+		pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
+		return -EIO;
+	}
+
+	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
+			req->orb_pointer);
+
+	return 0;
+}
+
+static void sbp_sense_mangle(struct sbp_target_request *req)
+{
+	struct se_cmd *se_cmd = &req->se_cmd;
+	u8 *sense = req->sense_buf;
+	u8 *status = req->status.data;
+
+	WARN_ON(se_cmd->scsi_sense_length < 18);
+
+	switch (sense[0] & 0x7f) { 		/* sfmt */
+	case 0x70: /* current, fixed */
+		status[0] = 0 << 6;
+		break;
+	case 0x71: /* deferred, fixed */
+		status[0] = 1 << 6;
+		break;
+	case 0x72: /* current, descriptor */
+	case 0x73: /* deferred, descriptor */
+	default:
+		/*
+		 * TODO: SBP-3 specifies what we should do with descriptor
+		 * format sense data
+		 */
+		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
+			sense[0]);
+		req->status.status |= cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_DEAD(0) |
+			STATUS_BLOCK_LEN(1) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
+		return;
+	}
+
+	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
+	status[1] =
+		(sense[0] & 0x80) |		/* valid */
+		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
+		(sense[2] & 0x0f);		/* sense_key */
+	status[2] = se_cmd->scsi_asc;		/* sense_code */
+	status[3] = se_cmd->scsi_ascq;		/* sense_qualifier */
+
+	/* information */
+	status[4] = sense[3];
+	status[5] = sense[4];
+	status[6] = sense[5];
+	status[7] = sense[6];
+
+	/* CDB-dependent */
+	status[8] = sense[8];
+	status[9] = sense[9];
+	status[10] = sense[10];
+	status[11] = sense[11];
+
+	/* fru */
+	status[12] = sense[14];
+
+	/* sense_key-dependent */
+	status[13] = sense[15];
+	status[14] = sense[16];
+	status[15] = sense[17];
+
+	req->status.status |= cpu_to_be32(
+		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+		STATUS_BLOCK_DEAD(0) |
+		STATUS_BLOCK_LEN(5) |
+		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static int sbp_send_sense(struct sbp_target_request *req)
+{
+	struct se_cmd *se_cmd = &req->se_cmd;
+
+	if (se_cmd->scsi_sense_length) {
+		sbp_sense_mangle(req);
+	} else {
+		req->status.status |= cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_DEAD(0) |
+			STATUS_BLOCK_LEN(1) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+	}
+
+	return sbp_send_status(req);
+}
+
+static void sbp_free_request(struct sbp_target_request *req)
+{
+	kfree(req->pg_tbl);
+	kfree(req->cmd_buf);
+	kfree(req);
+}
+
+static void sbp_mgt_agent_process(struct work_struct *work)
+{
+	struct sbp_management_agent *agent =
+		container_of(work, struct sbp_management_agent, work);
+	struct sbp_management_request *req = agent->request;
+	int ret;
+	int status_data_len = 0;
+
+	/* fetch the ORB from the initiator */
+	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
+		req->node_addr, req->generation, req->speed,
+		agent->orb_offset, &req->orb, sizeof(req->orb));
+	if (ret != RCODE_COMPLETE) {
+		pr_debug("mgt_orb fetch failed: %x\n", ret);
+		goto out;
+	}
+
+	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
+		sbp2_pointer_to_addr(&req->orb.ptr1),
+		sbp2_pointer_to_addr(&req->orb.ptr2),
+		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
+		sbp2_pointer_to_addr(&req->orb.status_fifo));
+
+	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
+		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
+		pr_err("mgt_orb bad request\n");
+		goto out;
+	}
+
+	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
+	case MANAGEMENT_ORB_FUNCTION_LOGIN:
+		sbp_management_request_login(agent, req, &status_data_len);
+		break;
+
+	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
+		sbp_management_request_query_logins(agent, req,
+				&status_data_len);
+		break;
+
+	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
+		sbp_management_request_reconnect(agent, req, &status_data_len);
+		break;
+
+	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
+		pr_notice("SET PASSWORD not implemented\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+		break;
+
+	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
+		sbp_management_request_logout(agent, req, &status_data_len);
+		break;
+
+	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
+		pr_notice("ABORT TASK not implemented\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+		break;
+
+	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
+		pr_notice("ABORT TASK SET not implemented\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+		break;
+
+	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
+		pr_notice("LOGICAL UNIT RESET not implemented\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+		break;
+
+	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
+		pr_notice("TARGET RESET not implemented\n");
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+		break;
+
+	default:
+		pr_notice("unknown management function 0x%x\n",
+			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
+
+		req->status.status = cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+		break;
+	}
+
+	req->status.status |= cpu_to_be32(
+		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
+		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
+		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
+	req->status.orb_low = cpu_to_be32(agent->orb_offset);
+
+	/* write the status block back to the initiator */
+	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
+		req->node_addr, req->generation, req->speed,
+		sbp2_pointer_to_addr(&req->orb.status_fifo),
+		&req->status, 8 + status_data_len);
+	if (ret != RCODE_COMPLETE) {
+		pr_debug("mgt_orb status write failed: %x\n", ret);
+		goto out;
+	}
+
+out:
+	fw_card_put(req->card);
+	kfree(req);
+
+	spin_lock_bh(&agent->lock);
+	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
+	spin_unlock_bh(&agent->lock);
+}
+
+static void sbp_mgt_agent_rw(struct fw_card *card,
+	struct fw_request *request, int tcode, int destination, int source,
+	int generation, unsigned long long offset, void *data, size_t length,
+	void *callback_data)
+{
+	struct sbp_management_agent *agent = callback_data;
+	struct sbp2_pointer *ptr = data;
+	int rcode = RCODE_ADDRESS_ERROR;
+
+	if (!agent->tport->enable)
+		goto out;
+
+	if ((offset != agent->handler.offset) || (length != 8))
+		goto out;
+
+	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
+		struct sbp_management_request *req;
+		int prev_state;
+
+		spin_lock_bh(&agent->lock);
+		prev_state = agent->state;
+		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
+		spin_unlock_bh(&agent->lock);
+
+		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
+			pr_notice("ignoring management request while busy\n");
+			rcode = RCODE_CONFLICT_ERROR;
+			goto out;
+		}
+
+		req = kzalloc(sizeof(*req), GFP_ATOMIC);
+		if (!req) {
+			rcode = RCODE_CONFLICT_ERROR;
+			goto out;
+		}
+
+		req->card = fw_card_get(card);
+		req->generation = generation;
+		req->node_addr = source;
+		req->speed = fw_get_request_speed(request);
+
+		agent->orb_offset = sbp2_pointer_to_addr(ptr);
+		agent->request = req;
+
+		queue_work(system_unbound_wq, &agent->work);
+		rcode = RCODE_COMPLETE;
+	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
+		addr_to_sbp2_pointer(agent->orb_offset, ptr);
+		rcode = RCODE_COMPLETE;
+	} else {
+		rcode = RCODE_TYPE_ERROR;
+	}
+
+out:
+	fw_send_response(card, request, rcode);
+}
+
+static struct sbp_management_agent *sbp_management_agent_register(
+		struct sbp_tport *tport)
+{
+	int ret;
+	struct sbp_management_agent *agent;
+
+	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
+	if (!agent)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&agent->lock);
+	agent->tport = tport;
+	agent->handler.length = 0x08;
+	agent->handler.address_callback = sbp_mgt_agent_rw;
+	agent->handler.callback_data = agent;
+	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
+	INIT_WORK(&agent->work, sbp_mgt_agent_process);
+	agent->orb_offset = 0;
+	agent->request = NULL;
+
+	ret = fw_core_add_address_handler(&agent->handler,
+			&sbp_register_region);
+	if (ret < 0) {
+		kfree(agent);
+		return ERR_PTR(ret);
+	}
+
+	return agent;
+}
+
+static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
+{
+	fw_core_remove_address_handler(&agent->handler);
+	cancel_work_sync(&agent->work);
+	kfree(agent);
+}
+
+static int sbp_check_true(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int sbp_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static char *sbp_get_fabric_name(void)
+{
+	return "sbp";
+}
+
+static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+
+	return &tport->tport_name[0];
+}
+
+static u16 sbp_get_tag(struct se_portal_group *se_tpg)
+{
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	return tpg->tport_tpgt;
+}
+
+static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static void sbp_release_cmd(struct se_cmd *se_cmd)
+{
+	struct sbp_target_request *req = container_of(se_cmd,
+			struct sbp_target_request, se_cmd);
+
+	sbp_free_request(req);
+}
+
+static int sbp_shutdown_session(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static void sbp_close_session(struct se_session *se_sess)
+{
+	return;
+}
+
+static u32 sbp_sess_get_index(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static int sbp_write_pending(struct se_cmd *se_cmd)
+{
+	struct sbp_target_request *req = container_of(se_cmd,
+			struct sbp_target_request, se_cmd);
+	int ret;
+
+	ret = sbp_rw_data(req);
+	if (ret) {
+		req->status.status |= cpu_to_be32(
+			STATUS_BLOCK_RESP(
+				STATUS_RESP_TRANSPORT_FAILURE) |
+			STATUS_BLOCK_DEAD(0) |
+			STATUS_BLOCK_LEN(1) |
+			STATUS_BLOCK_SBP_STATUS(
+				SBP_STATUS_UNSPECIFIED_ERROR));
+		sbp_send_status(req);
+		return ret;
+	}
+
+	target_execute_cmd(se_cmd);
+	return 0;
+}
+
+static int sbp_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
+{
+	return;
+}
+
+static int sbp_get_cmd_state(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int sbp_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct sbp_target_request *req = container_of(se_cmd,
+			struct sbp_target_request, se_cmd);
+	int ret;
+
+	ret = sbp_rw_data(req);
+	if (ret) {
+		req->status.status |= cpu_to_be32(
+			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+			STATUS_BLOCK_DEAD(0) |
+			STATUS_BLOCK_LEN(1) |
+			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+		sbp_send_status(req);
+		return ret;
+	}
+
+	return sbp_send_sense(req);
+}
+
+/*
+ * Called after command (no data transfer) or after the write (to device)
+ * operation is completed
+ */
+static int sbp_queue_status(struct se_cmd *se_cmd)
+{
+	struct sbp_target_request *req = container_of(se_cmd,
+			struct sbp_target_request, se_cmd);
+
+	return sbp_send_sense(req);
+}
+
+static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+}
+
+static void sbp_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
+static int sbp_check_stop_free(struct se_cmd *se_cmd)
+{
+	struct sbp_target_request *req = container_of(se_cmd,
+			struct sbp_target_request, se_cmd);
+
+	transport_generic_free_cmd(&req->se_cmd, 0);
+	return 1;
+}
+
+static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
+{
+	struct se_lun *lun;
+	int count = 0;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
+		count++;
+	rcu_read_unlock();
+
+	return count;
+}
+
+static int sbp_update_unit_directory(struct sbp_tport *tport)
+{
+	struct se_lun *lun;
+	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
+	u32 *data;
+
+	if (tport->unit_directory.data) {
+		fw_core_remove_descriptor(&tport->unit_directory);
+		kfree(tport->unit_directory.data);
+		tport->unit_directory.data = NULL;
+	}
+
+	if (!tport->enable || !tport->tpg)
+		return 0;
+
+	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
+
+	/*
+	 * Number of entries in the final unit directory:
+	 *  - all of those in the template
+	 *  - management_agent
+	 *  - unit_characteristics
+	 *  - reconnect_timeout
+	 *  - unit unique ID
+	 *  - one for each LUN
+	 *
+	 *  MUST NOT include leaf or sub-directory entries
+	 */
+	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
+
+	if (tport->directory_id != -1)
+		num_entries++;
+
+	/* allocate num_entries + 4 for the header and unique ID leaf */
+	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	/* directory_length */
+	data[idx++] = num_entries << 16;
+
+	/* directory_id */
+	if (tport->directory_id != -1)
+		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
+
+	/* unit directory template */
+	memcpy(&data[idx], sbp_unit_directory_template,
+			sizeof(sbp_unit_directory_template));
+	idx += ARRAY_SIZE(sbp_unit_directory_template);
+
+	/* management_agent */
+	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
+	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
+
+	/* unit_characteristics */
+	data[idx++] = 0x3a000000 |
+		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
+		SBP_ORB_FETCH_SIZE;
+
+	/* reconnect_timeout */
+	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
+
+	/* unit unique ID (leaf is just after LUNs) */
+	data[idx++] = 0x8d000000 | (num_luns + 1);
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
+		struct se_device *dev;
+		int type;
+		/*
+		 * rcu_dereference_raw protected by se_lun->lun_group symlink
+		 * reference to se_device->dev_group.
+		 */
+		dev = rcu_dereference_raw(lun->lun_se_dev);
+		type = dev->transport->get_device_type(dev);
+
+		/* logical_unit_number */
+		data[idx++] = 0x14000000 |
+			((type << 16) & 0x1f0000) |
+			(lun->unpacked_lun & 0xffff);
+	}
+	rcu_read_unlock();
+
+	/* unit unique ID leaf */
+	data[idx++] = 2 << 16;
+	data[idx++] = tport->guid >> 32;
+	data[idx++] = tport->guid;
+
+	tport->unit_directory.length = idx;
+	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
+	tport->unit_directory.data = data;
+
+	ret = fw_core_add_descriptor(&tport->unit_directory);
+	if (ret < 0) {
+		kfree(tport->unit_directory.data);
+		tport->unit_directory.data = NULL;
+	}
+
+	return ret;
+}
+
+static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
+{
+	const char *cp;
+	char c, nibble;
+	int pos = 0, err;
+
+	*wwn = 0;
+	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
+		c = *cp;
+		if (c == '\n' && cp[1] == '\0')
+			continue;
+		if (c == '\0') {
+			err = 2;
+			if (pos != 16)
+				goto fail;
+			return cp - name;
+		}
+		err = 3;
+		if (isdigit(c))
+			nibble = c - '0';
+		else if (isxdigit(c))
+			nibble = tolower(c) - 'a' + 10;
+		else
+			goto fail;
+		*wwn = (*wwn << 4) | nibble;
+		pos++;
+	}
+	err = 4;
+fail:
+	printk(KERN_INFO "err %u len %zu pos %u\n",
+			err, cp - name, pos);
+	return -1;
+}
+
+static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
+{
+	return snprintf(buf, len, "%016llx", wwn);
+}
+
+static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
+{
+	u64 guid = 0;
+
+	if (sbp_parse_wwn(name, &guid) < 0)
+		return -EINVAL;
+	return 0;
+}
+
+static int sbp_post_link_lun(
+		struct se_portal_group *se_tpg,
+		struct se_lun *se_lun)
+{
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+
+	return sbp_update_unit_directory(tpg->tport);
+}
+
+static void sbp_pre_unlink_lun(
+		struct se_portal_group *se_tpg,
+		struct se_lun *se_lun)
+{
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+	int ret;
+
+	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
+		tport->enable = 0;
+
+	ret = sbp_update_unit_directory(tport);
+	if (ret < 0)
+		pr_err("unlink LUN: failed to update unit directory\n");
+}
+
+static struct se_portal_group *sbp_make_tpg(
+		struct se_wwn *wwn,
+		struct config_group *group,
+		const char *name)
+{
+	struct sbp_tport *tport =
+		container_of(wwn, struct sbp_tport, tport_wwn);
+
+	struct sbp_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	if (tport->tpg) {
+		pr_err("Only one TPG per Unit is possible.\n");
+		return ERR_PTR(-EBUSY);
+	}
+
+	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct sbp_tpg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	tpg->tport = tport;
+	tpg->tport_tpgt = tpgt;
+	tport->tpg = tpg;
+
+	/* default attribute values */
+	tport->enable = 0;
+	tport->directory_id = -1;
+	tport->mgt_orb_timeout = 15;
+	tport->max_reconnect_timeout = 5;
+	tport->max_logins_per_lun = 1;
+
+	tport->mgt_agt = sbp_management_agent_register(tport);
+	if (IS_ERR(tport->mgt_agt)) {
+		ret = PTR_ERR(tport->mgt_agt);
+		goto out_free_tpg;
+	}
+
+	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
+	if (ret < 0)
+		goto out_unreg_mgt_agt;
+
+	return &tpg->se_tpg;
+
+out_unreg_mgt_agt:
+	sbp_management_agent_unregister(tport->mgt_agt);
+out_free_tpg:
+	tport->tpg = NULL;
+	kfree(tpg);
+	return ERR_PTR(ret);
+}
+
+static void sbp_drop_tpg(struct se_portal_group *se_tpg)
+{
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+
+	core_tpg_deregister(se_tpg);
+	sbp_management_agent_unregister(tport->mgt_agt);
+	tport->tpg = NULL;
+	kfree(tpg);
+}
+
+static struct se_wwn *sbp_make_tport(
+		struct target_fabric_configfs *tf,
+		struct config_group *group,
+		const char *name)
+{
+	struct sbp_tport *tport;
+	u64 guid = 0;
+
+	if (sbp_parse_wwn(name, &guid) < 0)
+		return ERR_PTR(-EINVAL);
+
+	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
+	if (!tport) {
+		pr_err("Unable to allocate struct sbp_tport\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	tport->guid = guid;
+	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
+
+	return &tport->tport_wwn;
+}
+
+static void sbp_drop_tport(struct se_wwn *wwn)
+{
+	struct sbp_tport *tport =
+		container_of(wwn, struct sbp_tport, tport_wwn);
+
+	kfree(tport);
+}
+
+static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
+}
+
+CONFIGFS_ATTR_RO(sbp_wwn_, version);
+
+static struct configfs_attribute *sbp_wwn_attrs[] = {
+	&sbp_wwn_attr_version,
+	NULL,
+};
+
+static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+
+	if (tport->directory_id == -1)
+		return sprintf(page, "implicit\n");
+	else
+		return sprintf(page, "%06x\n", tport->directory_id);
+}
+
+static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+	unsigned long val;
+
+	if (tport->enable) {
+		pr_err("Cannot change the directory_id on an active target.\n");
+		return -EBUSY;
+	}
+
+	if (strstr(page, "implicit") == page) {
+		tport->directory_id = -1;
+	} else {
+		if (kstrtoul(page, 16, &val) < 0)
+			return -EINVAL;
+		if (val > 0xffffff)
+			return -EINVAL;
+
+		tport->directory_id = val;
+	}
+
+	return count;
+}
+
+static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+	return sprintf(page, "%d\n", tport->enable);
+}
+
+static ssize_t sbp_tpg_enable_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(page, 0, &val) < 0)
+		return -EINVAL;
+	if ((val != 0) && (val != 1))
+		return -EINVAL;
+
+	if (tport->enable == val)
+		return count;
+
+	if (val) {
+		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
+			pr_err("Cannot enable a target with no LUNs!\n");
+			return -EINVAL;
+		}
+	} else {
+		/* XXX: force-shutdown sessions instead? */
+		spin_lock_bh(&se_tpg->session_lock);
+		if (!list_empty(&se_tpg->tpg_sess_list)) {
+			spin_unlock_bh(&se_tpg->session_lock);
+			return -EBUSY;
+		}
+		spin_unlock_bh(&se_tpg->session_lock);
+	}
+
+	tport->enable = val;
+
+	ret = sbp_update_unit_directory(tport);
+	if (ret < 0) {
+		pr_err("Could not update Config ROM\n");
+		return ret;
+	}
+
+	return count;
+}
+
+CONFIGFS_ATTR(sbp_tpg_, directory_id);
+CONFIGFS_ATTR(sbp_tpg_, enable);
+
+static struct configfs_attribute *sbp_tpg_base_attrs[] = {
+	&sbp_tpg_attr_directory_id,
+	&sbp_tpg_attr_enable,
+	NULL,
+};
+
+static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
+		char *page)
+{
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
+}
+
+static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(page, 0, &val) < 0)
+		return -EINVAL;
+	if ((val < 1) || (val > 127))
+		return -EINVAL;
+
+	if (tport->mgt_orb_timeout == val)
+		return count;
+
+	tport->mgt_orb_timeout = val;
+
+	ret = sbp_update_unit_directory(tport);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
+		char *page)
+{
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
+}
+
+static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(page, 0, &val) < 0)
+		return -EINVAL;
+	if ((val < 1) || (val > 32767))
+		return -EINVAL;
+
+	if (tport->max_reconnect_timeout == val)
+		return count;
+
+	tport->max_reconnect_timeout = val;
+
+	ret = sbp_update_unit_directory(tport);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
+		char *page)
+{
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+	return sprintf(page, "%d\n", tport->max_logins_per_lun);
+}
+
+static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);
+	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+	struct sbp_tport *tport = tpg->tport;
+	unsigned long val;
+
+	if (kstrtoul(page, 0, &val) < 0)
+		return -EINVAL;
+	if ((val < 1) || (val > 127))
+		return -EINVAL;
+
+	/* XXX: also check against current count? */
+
+	tport->max_logins_per_lun = val;
+
+	return count;
+}
+
+CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
+CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
+CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
+
+static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
+	&sbp_tpg_attrib_attr_mgt_orb_timeout,
+	&sbp_tpg_attrib_attr_max_reconnect_timeout,
+	&sbp_tpg_attrib_attr_max_logins_per_lun,
+	NULL,
+};
+
+static const struct target_core_fabric_ops sbp_ops = {
+	.module				= THIS_MODULE,
+	.name				= "sbp",
+	.get_fabric_name		= sbp_get_fabric_name,
+	.tpg_get_wwn			= sbp_get_fabric_wwn,
+	.tpg_get_tag			= sbp_get_tag,
+	.tpg_check_demo_mode		= sbp_check_true,
+	.tpg_check_demo_mode_cache	= sbp_check_true,
+	.tpg_check_demo_mode_write_protect = sbp_check_false,
+	.tpg_check_prod_mode_write_protect = sbp_check_false,
+	.tpg_get_inst_index		= sbp_tpg_get_inst_index,
+	.release_cmd			= sbp_release_cmd,
+	.shutdown_session		= sbp_shutdown_session,
+	.close_session			= sbp_close_session,
+	.sess_get_index			= sbp_sess_get_index,
+	.write_pending			= sbp_write_pending,
+	.write_pending_status		= sbp_write_pending_status,
+	.set_default_node_attributes	= sbp_set_default_node_attrs,
+	.get_cmd_state			= sbp_get_cmd_state,
+	.queue_data_in			= sbp_queue_data_in,
+	.queue_status			= sbp_queue_status,
+	.queue_tm_rsp			= sbp_queue_tm_rsp,
+	.aborted_task			= sbp_aborted_task,
+	.check_stop_free		= sbp_check_stop_free,
+
+	.fabric_make_wwn		= sbp_make_tport,
+	.fabric_drop_wwn		= sbp_drop_tport,
+	.fabric_make_tpg		= sbp_make_tpg,
+	.fabric_drop_tpg		= sbp_drop_tpg,
+	.fabric_post_link		= sbp_post_link_lun,
+	.fabric_pre_unlink		= sbp_pre_unlink_lun,
+	.fabric_make_np			= NULL,
+	.fabric_drop_np			= NULL,
+	.fabric_init_nodeacl		= sbp_init_nodeacl,
+
+	.tfc_wwn_attrs			= sbp_wwn_attrs,
+	.tfc_tpg_base_attrs		= sbp_tpg_base_attrs,
+	.tfc_tpg_attrib_attrs		= sbp_tpg_attrib_attrs,
+};
+
+static int __init sbp_init(void)
+{
+	return target_register_template(&sbp_ops);
+};
+
+static void __exit sbp_exit(void)
+{
+	target_unregister_template(&sbp_ops);
+};
+
+MODULE_DESCRIPTION("FireWire SBP fabric driver");
+MODULE_LICENSE("GPL");
+module_init(sbp_init);
+module_exit(sbp_exit);
diff --git a/drivers/target/sbp/sbp_target.h b/drivers/target/sbp/sbp_target.h
new file mode 100644
index 0000000..73bcb12
--- /dev/null
+++ b/drivers/target/sbp/sbp_target.h
@@ -0,0 +1,242 @@
+#ifndef _SBP_BASE_H
+#define _SBP_BASE_H
+
+#include <linux/firewire.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <target/target_core_base.h>
+
+#define SBP_VERSION  "v0.1"
+#define SBP_NAMELEN 32
+
+#define SBP_ORB_FETCH_SIZE	8
+
+#define MANAGEMENT_AGENT_STATE_IDLE	0
+#define MANAGEMENT_AGENT_STATE_BUSY	1
+
+#define ORB_NOTIFY(v)			(((v) >> 31) & 0x01)
+#define ORB_REQUEST_FORMAT(v)		(((v) >> 29) & 0x03)
+
+#define MANAGEMENT_ORB_FUNCTION(v)	(((v) >> 16) & 0x0f)
+
+#define MANAGEMENT_ORB_FUNCTION_LOGIN			0x0
+#define MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS		0x1
+#define MANAGEMENT_ORB_FUNCTION_RECONNECT		0x3
+#define MANAGEMENT_ORB_FUNCTION_SET_PASSWORD		0x4
+#define MANAGEMENT_ORB_FUNCTION_LOGOUT			0x7
+#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK		0xb
+#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET		0xc
+#define MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET	0xe
+#define MANAGEMENT_ORB_FUNCTION_TARGET_RESET		0xf
+
+#define LOGIN_ORB_EXCLUSIVE(v)		(((v) >> 28) &   0x01)
+#define LOGIN_ORB_RESERVED(v)		(((v) >> 24) &   0x0f)
+#define LOGIN_ORB_RECONNECT(v)		(((v) >> 20) &   0x0f)
+#define LOGIN_ORB_LUN(v)		(((v) >>  0) & 0xffff)
+#define LOGIN_ORB_PASSWORD_LENGTH(v)	(((v) >> 16) & 0xffff)
+#define LOGIN_ORB_RESPONSE_LENGTH(v)	(((v) >>  0) & 0xffff)
+
+#define RECONNECT_ORB_LOGIN_ID(v)	(((v) >>  0) & 0xffff)
+#define LOGOUT_ORB_LOGIN_ID(v)		(((v) >>  0) & 0xffff)
+
+#define CMDBLK_ORB_DIRECTION(v)		(((v) >> 27) &   0x01)
+#define CMDBLK_ORB_SPEED(v)		(((v) >> 24) &   0x07)
+#define CMDBLK_ORB_MAX_PAYLOAD(v)	(((v) >> 20) &   0x0f)
+#define CMDBLK_ORB_PG_TBL_PRESENT(v)	(((v) >> 19) &   0x01)
+#define CMDBLK_ORB_PG_SIZE(v)		(((v) >> 16) &   0x07)
+#define CMDBLK_ORB_DATA_SIZE(v)		(((v) >>  0) & 0xffff)
+
+#define STATUS_BLOCK_SRC(v)		(((v) &   0x03) << 30)
+#define STATUS_BLOCK_RESP(v)		(((v) &   0x03) << 28)
+#define STATUS_BLOCK_DEAD(v)		(((v) ? 1 : 0)  << 27)
+#define STATUS_BLOCK_LEN(v)		(((v) &   0x07) << 24)
+#define STATUS_BLOCK_SBP_STATUS(v)	(((v) &   0xff) << 16)
+#define STATUS_BLOCK_ORB_OFFSET_HIGH(v)	(((v) & 0xffff) <<  0)
+
+#define STATUS_SRC_ORB_CONTINUING	0
+#define STATUS_SRC_ORB_FINISHED		1
+#define STATUS_SRC_UNSOLICITED		2
+
+#define STATUS_RESP_REQUEST_COMPLETE	0
+#define STATUS_RESP_TRANSPORT_FAILURE	1
+#define STATUS_RESP_ILLEGAL_REQUEST	2
+#define STATUS_RESP_VENDOR_DEPENDENT	3
+
+#define SBP_STATUS_OK			0
+#define SBP_STATUS_REQ_TYPE_NOTSUPP	1
+#define SBP_STATUS_SPEED_NOTSUPP	2
+#define SBP_STATUS_PAGE_SIZE_NOTSUPP	3
+#define SBP_STATUS_ACCESS_DENIED	4
+#define SBP_STATUS_LUN_NOTSUPP		5
+#define SBP_STATUS_PAYLOAD_TOO_SMALL	6
+/* 7 is reserved */
+#define SBP_STATUS_RESOURCES_UNAVAIL	8
+#define SBP_STATUS_FUNCTION_REJECTED	9
+#define SBP_STATUS_LOGIN_ID_UNKNOWN	10
+#define SBP_STATUS_DUMMY_ORB_COMPLETE	11
+#define SBP_STATUS_REQUEST_ABORTED	12
+#define SBP_STATUS_UNSPECIFIED_ERROR	0xff
+
+#define AGENT_STATE_RESET	0
+#define AGENT_STATE_ACTIVE	1
+#define AGENT_STATE_SUSPENDED	2
+#define AGENT_STATE_DEAD	3
+
+struct sbp2_pointer {
+	__be32 high;
+	__be32 low;
+};
+
+struct sbp_command_block_orb {
+	struct sbp2_pointer next_orb;
+	struct sbp2_pointer data_descriptor;
+	__be32 misc;
+	u8 command_block[12];
+};
+
+struct sbp_page_table_entry {
+	__be16 segment_length;
+	__be16 segment_base_hi;
+	__be32 segment_base_lo;
+};
+
+struct sbp_management_orb {
+	struct sbp2_pointer ptr1;
+	struct sbp2_pointer ptr2;
+	__be32 misc;
+	__be32 length;
+	struct sbp2_pointer status_fifo;
+};
+
+struct sbp_status_block {
+	__be32 status;
+	__be32 orb_low;
+	u8 data[24];
+};
+
+struct sbp_login_response_block {
+	__be32 misc;
+	struct sbp2_pointer command_block_agent;
+	__be32 reconnect_hold;
+};
+
+struct sbp_login_descriptor {
+	struct sbp_session *sess;
+	struct list_head link;
+
+	u32 login_lun;
+
+	u64 status_fifo_addr;
+	int exclusive;
+	u16 login_id;
+
+	struct sbp_target_agent *tgt_agt;
+};
+
+struct sbp_session {
+	spinlock_t lock;
+	struct se_session *se_sess;
+	struct list_head login_list;
+	struct delayed_work maint_work;
+
+	u64 guid; /* login_owner_EUI_64 */
+	int node_id; /* login_owner_ID */
+
+	struct fw_card *card;
+	int generation;
+	int speed;
+
+	int reconnect_hold;
+	u64 reconnect_expires;
+};
+
+struct sbp_tpg {
+	/* Target portal group tag for TCM */
+	u16 tport_tpgt;
+	/* Pointer back to sbp_tport */
+	struct sbp_tport *tport;
+	/* Returned by sbp_make_tpg() */
+	struct se_portal_group se_tpg;
+};
+
+struct sbp_tport {
+	/* Target Unit Identifier (EUI-64) */
+	u64 guid;
+	/* Target port name */
+	char tport_name[SBP_NAMELEN];
+	/* Returned by sbp_make_tport() */
+	struct se_wwn tport_wwn;
+
+	struct sbp_tpg *tpg;
+
+	/* FireWire unit directory */
+	struct fw_descriptor unit_directory;
+
+	/* SBP Management Agent */
+	struct sbp_management_agent *mgt_agt;
+
+	/* Parameters */
+	int enable;
+	s32 directory_id;
+	int mgt_orb_timeout;
+	int max_reconnect_timeout;
+	int max_logins_per_lun;
+};
+
+static inline u64 sbp2_pointer_to_addr(const struct sbp2_pointer *ptr)
+{
+	return (u64)(be32_to_cpu(ptr->high) & 0x0000ffff) << 32 |
+		(be32_to_cpu(ptr->low) & 0xfffffffc);
+}
+
+static inline void addr_to_sbp2_pointer(u64 addr, struct sbp2_pointer *ptr)
+{
+	ptr->high = cpu_to_be32(addr >> 32);
+	ptr->low = cpu_to_be32(addr);
+}
+
+struct sbp_target_agent {
+	spinlock_t lock;
+	struct fw_address_handler handler;
+	struct sbp_login_descriptor *login;
+	int state;
+	struct work_struct work;
+	u64 orb_pointer;
+	bool doorbell;
+};
+
+struct sbp_target_request {
+	struct sbp_login_descriptor *login;
+	u64 orb_pointer;
+	struct sbp_command_block_orb orb;
+	struct sbp_status_block status;
+	struct work_struct work;
+
+	struct se_cmd se_cmd;
+	struct sbp_page_table_entry *pg_tbl;
+	void *cmd_buf;
+
+	unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
+};
+
+struct sbp_management_agent {
+	spinlock_t lock;
+	struct sbp_tport *tport;
+	struct fw_address_handler handler;
+	int state;
+	struct work_struct work;
+	u64 orb_offset;
+	struct sbp_management_request *request;
+};
+
+struct sbp_management_request {
+	struct sbp_management_orb orb;
+	struct sbp_status_block status;
+	struct fw_card *card;
+	int generation;
+	int node_addr;
+	int speed;
+};
+
+#endif
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
new file mode 100644
index 0000000..49aba4a
--- /dev/null
+++ b/drivers/target/target_core_alua.c
@@ -0,0 +1,2339 @@
+/*******************************************************************************
+ * Filename:  target_core_alua.c
+ *
+ * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
+ *
+ * (c) Copyright 2009-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/configfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <scsi/scsi_proto.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_ua.h"
+
+static sense_reason_t core_alua_check_transition(int state, int valid,
+						 int *primary);
+static int core_alua_set_tg_pt_secondary_state(
+		struct se_lun *lun, int explicit, int offline);
+
+static char *core_alua_dump_state(int state);
+
+static void __target_attach_tg_pt_gp(struct se_lun *lun,
+		struct t10_alua_tg_pt_gp *tg_pt_gp);
+
+static u16 alua_lu_gps_counter;
+static u32 alua_lu_gps_count;
+
+static DEFINE_SPINLOCK(lu_gps_lock);
+static LIST_HEAD(lu_gps_list);
+
+struct t10_alua_lu_gp *default_lu_gp;
+
+/*
+ * REPORT REFERRALS
+ *
+ * See sbc3r35 section 5.23
+ */
+sense_reason_t
+target_emulate_report_referrals(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct t10_alua_lba_map *map;
+	struct t10_alua_lba_map_member *map_mem;
+	unsigned char *buf;
+	u32 rd_len = 0, off;
+
+	if (cmd->data_length < 4) {
+		pr_warn("REPORT REFERRALS allocation length %u too"
+			" small\n", cmd->data_length);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	off = 4;
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	if (list_empty(&dev->t10_alua.lba_map_list)) {
+		spin_unlock(&dev->t10_alua.lba_map_lock);
+		transport_kunmap_data_sg(cmd);
+
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+	}
+
+	list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+			    lba_map_list) {
+		int desc_num = off + 3;
+		int pg_num;
+
+		off += 4;
+		if (cmd->data_length > off)
+			put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
+		off += 8;
+		if (cmd->data_length > off)
+			put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
+		off += 8;
+		rd_len += 20;
+		pg_num = 0;
+		list_for_each_entry(map_mem, &map->lba_map_mem_list,
+				    lba_map_mem_list) {
+			int alua_state = map_mem->lba_map_mem_alua_state;
+			int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
+
+			if (cmd->data_length > off)
+				buf[off] = alua_state & 0x0f;
+			off += 2;
+			if (cmd->data_length > off)
+				buf[off] = (alua_pg_id >> 8) & 0xff;
+			off++;
+			if (cmd->data_length > off)
+				buf[off] = (alua_pg_id & 0xff);
+			off++;
+			rd_len += 4;
+			pg_num++;
+		}
+		if (cmd->data_length > desc_num)
+			buf[desc_num] = pg_num;
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+
+	/*
+	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+	 */
+	put_unaligned_be16(rd_len, &buf[2]);
+
+	transport_kunmap_data_sg(cmd);
+
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
+/*
+ * REPORT_TARGET_PORT_GROUPS
+ *
+ * See spc4r17 section 6.27
+ */
+sense_reason_t
+target_emulate_report_target_port_groups(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_lun *lun;
+	unsigned char *buf;
+	u32 rd_len = 0, off;
+	int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
+
+	/*
+	 * Skip over RESERVED area to first Target port group descriptor
+	 * depending on the PARAMETER DATA FORMAT type..
+	 */
+	if (ext_hdr != 0)
+		off = 8;
+	else
+		off = 4;
+
+	if (cmd->data_length < off) {
+		pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
+			" small for %s header\n", cmd->data_length,
+			(ext_hdr) ? "extended" : "normal");
+		return TCM_INVALID_CDB_FIELD;
+	}
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
+			tg_pt_gp_list) {
+		/*
+		 * Check if the Target port group and Target port descriptor list
+		 * based on tg_pt_gp_members count will fit into the response payload.
+		 * Otherwise, bump rd_len to let the initiator know we have exceeded
+		 * the allocation length and the response is truncated.
+		 */
+		if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
+		     cmd->data_length) {
+			rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
+			continue;
+		}
+		/*
+		 * PREF: Preferred target port bit, determine if this
+		 * bit should be set for port group.
+		 */
+		if (tg_pt_gp->tg_pt_gp_pref)
+			buf[off] = 0x80;
+		/*
+		 * Set the ASYMMETRIC ACCESS State
+		 */
+		buf[off++] |= (atomic_read(
+			&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+		/*
+		 * Set supported ASYMMETRIC ACCESS State bits
+		 */
+		buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
+		/*
+		 * TARGET PORT GROUP
+		 */
+		buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
+		buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
+
+		off++; /* Skip over Reserved */
+		/*
+		 * STATUS CODE
+		 */
+		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
+		/*
+		 * Vendor Specific field
+		 */
+		buf[off++] = 0x00;
+		/*
+		 * TARGET PORT COUNT
+		 */
+		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
+		rd_len += 8;
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+				lun_tg_pt_gp_link) {
+			/*
+			 * Start Target Port descriptor format
+			 *
+			 * See spc4r17 section 6.2.7 Table 247
+			 */
+			off += 2; /* Skip over Obsolete */
+			/*
+			 * Set RELATIVE TARGET PORT IDENTIFIER
+			 */
+			buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
+			buf[off++] = (lun->lun_rtpi & 0xff);
+			rd_len += 4;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+	}
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+	/*
+	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+	 */
+	put_unaligned_be32(rd_len, &buf[0]);
+
+	/*
+	 * Fill in the Extended header parameter data format if requested
+	 */
+	if (ext_hdr != 0) {
+		buf[4] = 0x10;
+		/*
+		 * Set the implicit transition time (in seconds) for the application
+		 * client to use as a base for it's transition timeout value.
+		 *
+		 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
+		 * this CDB was received upon to determine this value individually
+		 * for ALUA target port group.
+		 */
+		spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
+		tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
+		if (tg_pt_gp)
+			buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
+		spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
+	}
+	transport_kunmap_data_sg(cmd);
+
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
+/*
+ * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
+ *
+ * See spc4r17 section 6.35
+ */
+sense_reason_t
+target_emulate_set_target_port_groups(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_lun *l_lun = cmd->se_lun;
+	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
+	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
+	unsigned char *buf;
+	unsigned char *ptr;
+	sense_reason_t rc = TCM_NO_SENSE;
+	u32 len = 4; /* Skip over RESERVED area in header */
+	int alua_access_state, primary = 0, valid_states;
+	u16 tg_pt_id, rtpi;
+
+	if (cmd->data_length < 4) {
+		pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
+			" small\n", cmd->data_length);
+		return TCM_INVALID_PARAMETER_LIST;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	/*
+	 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
+	 * for the local tg_pt_gp.
+	 */
+	spin_lock(&l_lun->lun_tg_pt_gp_lock);
+	l_tg_pt_gp = l_lun->lun_tg_pt_gp;
+	if (!l_tg_pt_gp) {
+		spin_unlock(&l_lun->lun_tg_pt_gp_lock);
+		pr_err("Unable to access l_lun->tg_pt_gp\n");
+		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
+		goto out;
+	}
+
+	if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
+		spin_unlock(&l_lun->lun_tg_pt_gp_lock);
+		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
+				" while TPGS_EXPLICIT_ALUA is disabled\n");
+		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
+		goto out;
+	}
+	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
+	spin_unlock(&l_lun->lun_tg_pt_gp_lock);
+
+	ptr = &buf[4]; /* Skip over RESERVED area in header */
+
+	while (len < cmd->data_length) {
+		bool found = false;
+		alua_access_state = (ptr[0] & 0x0f);
+		/*
+		 * Check the received ALUA access state, and determine if
+		 * the state is a primary or secondary target port asymmetric
+		 * access state.
+		 */
+		rc = core_alua_check_transition(alua_access_state,
+						valid_states, &primary);
+		if (rc) {
+			/*
+			 * If the SET TARGET PORT GROUPS attempts to establish
+			 * an invalid combination of target port asymmetric
+			 * access states or attempts to establish an
+			 * unsupported target port asymmetric access state,
+			 * then the command shall be terminated with CHECK
+			 * CONDITION status, with the sense key set to ILLEGAL
+			 * REQUEST, and the additional sense code set to INVALID
+			 * FIELD IN PARAMETER LIST.
+			 */
+			goto out;
+		}
+
+		/*
+		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
+		 * specifies a primary target port asymmetric access state,
+		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
+		 * a primary target port group for which the primary target
+		 * port asymmetric access state shall be changed. If the
+		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
+		 * port asymmetric access state, then the TARGET PORT GROUP OR
+		 * TARGET PORT field specifies the relative target port
+		 * identifier (see 3.1.120) of the target port for which the
+		 * secondary target port asymmetric access state shall be
+		 * changed.
+		 */
+		if (primary) {
+			tg_pt_id = get_unaligned_be16(ptr + 2);
+			/*
+			 * Locate the matching target port group ID from
+			 * the global tg_pt_gp list
+			 */
+			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+			list_for_each_entry(tg_pt_gp,
+					&dev->t10_alua.tg_pt_gps_list,
+					tg_pt_gp_list) {
+				if (!tg_pt_gp->tg_pt_gp_valid_id)
+					continue;
+
+				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
+					continue;
+
+				atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
+
+				spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+				if (!core_alua_do_port_transition(tg_pt_gp,
+						dev, l_lun, nacl,
+						alua_access_state, 1))
+					found = true;
+
+				spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+				atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
+				break;
+			}
+			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+		} else {
+			struct se_lun *lun;
+
+			/*
+			 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
+			 * the Target Port in question for the the incoming
+			 * SET_TARGET_PORT_GROUPS op.
+			 */
+			rtpi = get_unaligned_be16(ptr + 2);
+			/*
+			 * Locate the matching relative target port identifier
+			 * for the struct se_device storage object.
+			 */
+			spin_lock(&dev->se_port_lock);
+			list_for_each_entry(lun, &dev->dev_sep_list,
+							lun_dev_link) {
+				if (lun->lun_rtpi != rtpi)
+					continue;
+
+				// XXX: racy unlock
+				spin_unlock(&dev->se_port_lock);
+
+				if (!core_alua_set_tg_pt_secondary_state(
+						lun, 1, 1))
+					found = true;
+
+				spin_lock(&dev->se_port_lock);
+				break;
+			}
+			spin_unlock(&dev->se_port_lock);
+		}
+
+		if (!found) {
+			rc = TCM_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+
+		ptr += 4;
+		len += 4;
+	}
+
+out:
+	transport_kunmap_data_sg(cmd);
+	if (!rc)
+		target_complete_cmd(cmd, GOOD);
+	return rc;
+}
+
+static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
+{
+	/*
+	 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+	 * The ALUA additional sense code qualifier (ASCQ) is determined
+	 * by the ALUA primary or secondary access state..
+	 */
+	pr_debug("[%s]: ALUA TG Port not available, "
+		"SenseKey: NOT_READY, ASC/ASCQ: "
+		"0x04/0x%02x\n",
+		cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+	cmd->scsi_asc = 0x04;
+	cmd->scsi_ascq = alua_ascq;
+}
+
+static inline void core_alua_state_nonoptimized(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	int nonop_delay_msecs)
+{
+	/*
+	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
+	 * later to determine if processing of this cmd needs to be
+	 * temporarily delayed for the Active/NonOptimized primary access state.
+	 */
+	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
+	cmd->alua_nonop_delay = nonop_delay_msecs;
+}
+
+static inline int core_alua_state_lba_dependent(
+	struct se_cmd *cmd,
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_device *dev = cmd->se_dev;
+	u64 segment_size, segment_mult, sectors, lba;
+
+	/* Only need to check for cdb actually containing LBAs */
+	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
+		return 0;
+
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	segment_size = dev->t10_alua.lba_map_segment_size;
+	segment_mult = dev->t10_alua.lba_map_segment_multiplier;
+	sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+	lba = cmd->t_task_lba;
+	while (lba < cmd->t_task_lba + sectors) {
+		struct t10_alua_lba_map *cur_map = NULL, *map;
+		struct t10_alua_lba_map_member *map_mem;
+
+		list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+				    lba_map_list) {
+			u64 start_lba, last_lba;
+			u64 first_lba = map->lba_map_first_lba;
+
+			if (segment_mult) {
+				u64 tmp = lba;
+				start_lba = do_div(tmp, segment_size * segment_mult);
+
+				last_lba = first_lba + segment_size - 1;
+				if (start_lba >= first_lba &&
+				    start_lba <= last_lba) {
+					lba += segment_size;
+					cur_map = map;
+					break;
+				}
+			} else {
+				last_lba = map->lba_map_last_lba;
+				if (lba >= first_lba && lba <= last_lba) {
+					lba = last_lba + 1;
+					cur_map = map;
+					break;
+				}
+			}
+		}
+		if (!cur_map) {
+			spin_unlock(&dev->t10_alua.lba_map_lock);
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
+			return 1;
+		}
+		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
+				    lba_map_mem_list) {
+			if (map_mem->lba_map_mem_alua_pg_id !=
+			    tg_pt_gp->tg_pt_gp_id)
+				continue;
+			switch(map_mem->lba_map_mem_alua_state) {
+			case ALUA_ACCESS_STATE_STANDBY:
+				spin_unlock(&dev->t10_alua.lba_map_lock);
+				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
+				return 1;
+			case ALUA_ACCESS_STATE_UNAVAILABLE:
+				spin_unlock(&dev->t10_alua.lba_map_lock);
+				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
+				return 1;
+			default:
+				break;
+			}
+		}
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+	return 0;
+}
+
+static inline int core_alua_state_standby(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
+	 * spc4r17 section 5.9.2.4.4
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case LOG_SELECT:
+	case LOG_SENSE:
+	case MODE_SELECT:
+	case MODE_SENSE:
+	case REPORT_LUNS:
+	case RECEIVE_DIAGNOSTIC:
+	case SEND_DIAGNOSTIC:
+	case READ_CAPACITY:
+		return 0;
+	case SERVICE_ACTION_IN_16:
+		switch (cdb[1] & 0x1f) {
+		case SAI_READ_CAPACITY_16:
+			return 0;
+		default:
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
+			return 1;
+		}
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
+			return 1;
+		}
+	case MAINTENANCE_OUT:
+		switch (cdb[1]) {
+		case MO_SET_TARGET_PGS:
+			return 0;
+		default:
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case PERSISTENT_RESERVE_IN:
+	case PERSISTENT_RESERVE_OUT:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_unavailable(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
+	 * spc4r17 section 5.9.2.4.5
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+		return 0;
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
+			return 1;
+		}
+	case MAINTENANCE_OUT:
+		switch (cdb[1]) {
+		case MO_SET_TARGET_PGS:
+			return 0;
+		default:
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_transition(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
+	 * spc4r17 section 5.9.2.5
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+		return 0;
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * return 1: Is used to signal LUN not accessible, and check condition/not ready
+ * return 0: Used to signal success
+ * return -1: Used to signal failure, and invalid cdb field
+ */
+sense_reason_t
+target_alua_state_check(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *cdb = cmd->t_task_cdb;
+	struct se_lun *lun = cmd->se_lun;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	int out_alua_state, nonop_delay_msecs;
+
+	if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
+		return 0;
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+		return 0;
+
+	/*
+	 * First, check for a struct se_port specific secondary ALUA target port
+	 * access state: OFFLINE
+	 */
+	if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
+		pr_debug("ALUA: Got secondary offline status for local"
+				" target port\n");
+		set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
+		return TCM_CHECK_CONDITION_NOT_READY;
+	}
+
+	if (!lun->lun_tg_pt_gp)
+		return 0;
+
+	spin_lock(&lun->lun_tg_pt_gp_lock);
+	tg_pt_gp = lun->lun_tg_pt_gp;
+	out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
+
+	// XXX: keeps using tg_pt_gp witout reference after unlock
+	spin_unlock(&lun->lun_tg_pt_gp_lock);
+	/*
+	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
+	 * statement so the compiler knows explicitly to check this case first.
+	 * For the Optimized ALUA access state case, we want to process the
+	 * incoming fabric cmd ASAP..
+	 */
+	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
+		return 0;
+
+	switch (out_alua_state) {
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
+		break;
+	case ALUA_ACCESS_STATE_STANDBY:
+		if (core_alua_state_standby(cmd, cdb))
+			return TCM_CHECK_CONDITION_NOT_READY;
+		break;
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		if (core_alua_state_unavailable(cmd, cdb))
+			return TCM_CHECK_CONDITION_NOT_READY;
+		break;
+	case ALUA_ACCESS_STATE_TRANSITION:
+		if (core_alua_state_transition(cmd, cdb))
+			return TCM_CHECK_CONDITION_NOT_READY;
+		break;
+	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+		if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
+			return TCM_CHECK_CONDITION_NOT_READY;
+		break;
+	/*
+	 * OFFLINE is a secondary ALUA target port group access state, that is
+	 * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
+	 */
+	case ALUA_ACCESS_STATE_OFFLINE:
+	default:
+		pr_err("Unknown ALUA access state: 0x%02x\n",
+				out_alua_state);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	return 0;
+}
+
+/*
+ * Check implicit and explicit ALUA state change request.
+ */
+static sense_reason_t
+core_alua_check_transition(int state, int valid, int *primary)
+{
+	/*
+	 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+	 * defined as primary target port asymmetric access states.
+	 */
+	switch (state) {
+	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+		if (!(valid & ALUA_AO_SUP))
+			goto not_supported;
+		*primary = 1;
+		break;
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		if (!(valid & ALUA_AN_SUP))
+			goto not_supported;
+		*primary = 1;
+		break;
+	case ALUA_ACCESS_STATE_STANDBY:
+		if (!(valid & ALUA_S_SUP))
+			goto not_supported;
+		*primary = 1;
+		break;
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		if (!(valid & ALUA_U_SUP))
+			goto not_supported;
+		*primary = 1;
+		break;
+	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+		if (!(valid & ALUA_LBD_SUP))
+			goto not_supported;
+		*primary = 1;
+		break;
+	case ALUA_ACCESS_STATE_OFFLINE:
+		/*
+		 * OFFLINE state is defined as a secondary target port
+		 * asymmetric access state.
+		 */
+		if (!(valid & ALUA_O_SUP))
+			goto not_supported;
+		*primary = 0;
+		break;
+	case ALUA_ACCESS_STATE_TRANSITION:
+		/*
+		 * Transitioning is set internally, and
+		 * cannot be selected manually.
+		 */
+		goto not_supported;
+	default:
+		pr_err("Unknown ALUA access state: 0x%02x\n", state);
+		return TCM_INVALID_PARAMETER_LIST;
+	}
+
+	return 0;
+
+not_supported:
+	pr_err("ALUA access state %s not supported",
+	       core_alua_dump_state(state));
+	return TCM_INVALID_PARAMETER_LIST;
+}
+
+static char *core_alua_dump_state(int state)
+{
+	switch (state) {
+	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+		return "Active/Optimized";
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		return "Active/NonOptimized";
+	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+		return "LBA Dependent";
+	case ALUA_ACCESS_STATE_STANDBY:
+		return "Standby";
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		return "Unavailable";
+	case ALUA_ACCESS_STATE_OFFLINE:
+		return "Offline";
+	case ALUA_ACCESS_STATE_TRANSITION:
+		return "Transitioning";
+	default:
+		return "Unknown";
+	}
+
+	return NULL;
+}
+
+char *core_alua_dump_status(int status)
+{
+	switch (status) {
+	case ALUA_STATUS_NONE:
+		return "None";
+	case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
+		return "Altered by Explicit STPG";
+	case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
+		return "Altered by Implicit ALUA";
+	default:
+		return "Unknown";
+	}
+
+	return NULL;
+}
+
+/*
+ * Used by fabric modules to determine when we need to delay processing
+ * for the Active/NonOptimized paths..
+ */
+int core_alua_check_nonop_delay(
+	struct se_cmd *cmd)
+{
+	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
+		return 0;
+	if (in_interrupt())
+		return 0;
+	/*
+	 * The ALUA Active/NonOptimized access state delay can be disabled
+	 * in via configfs with a value of zero
+	 */
+	if (!cmd->alua_nonop_delay)
+		return 0;
+	/*
+	 * struct se_cmd->alua_nonop_delay gets set by a target port group
+	 * defined interval in core_alua_state_nonoptimized()
+	 */
+	msleep_interruptible(cmd->alua_nonop_delay);
+	return 0;
+}
+EXPORT_SYMBOL(core_alua_check_nonop_delay);
+
+static int core_alua_write_tpg_metadata(
+	const char *path,
+	unsigned char *md_buf,
+	u32 md_buf_len)
+{
+	struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
+	int ret;
+
+	if (IS_ERR(file)) {
+		pr_err("filp_open(%s) for ALUA metadata failed\n", path);
+		return -ENODEV;
+	}
+	ret = kernel_write(file, md_buf, md_buf_len, 0);
+	if (ret < 0)
+		pr_err("Error writing ALUA metadata file: %s\n", path);
+	fput(file);
+	return (ret < 0) ? -EIO : 0;
+}
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ */
+static int core_alua_update_tpg_primary_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	unsigned char *md_buf;
+	struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
+	char path[ALUA_METADATA_PATH_LEN];
+	int len, rc;
+
+	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+	if (!md_buf) {
+		pr_err("Unable to allocate buf for ALUA metadata\n");
+		return -ENOMEM;
+	}
+
+	memset(path, 0, ALUA_METADATA_PATH_LEN);
+
+	len = snprintf(md_buf, ALUA_MD_BUF_LEN,
+			"tg_pt_gp_id=%hu\n"
+			"alua_access_state=0x%02x\n"
+			"alua_access_status=0x%02x\n",
+			tg_pt_gp->tg_pt_gp_id,
+			tg_pt_gp->tg_pt_gp_alua_pending_state,
+			tg_pt_gp->tg_pt_gp_alua_access_status);
+
+	snprintf(path, ALUA_METADATA_PATH_LEN,
+		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
+		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+
+	rc = core_alua_write_tpg_metadata(path, md_buf, len);
+	kfree(md_buf);
+	return rc;
+}
+
+static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_dev_entry *se_deve;
+	struct se_lun *lun;
+	struct se_lun_acl *lacl;
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+				lun_tg_pt_gp_link) {
+		/*
+		 * After an implicit target port asymmetric access state
+		 * change, a device server shall establish a unit attention
+		 * condition for the initiator port associated with every I_T
+		 * nexus with the additional sense code set to ASYMMETRIC
+		 * ACCESS STATE CHANGED.
+		 *
+		 * After an explicit target port asymmetric access state
+		 * change, a device server shall establish a unit attention
+		 * condition with the additional sense code set to ASYMMETRIC
+		 * ACCESS STATE CHANGED for the initiator port associated with
+		 * every I_T nexus other than the I_T nexus on which the SET
+		 * TARGET PORT GROUPS command
+		 */
+		if (!percpu_ref_tryget_live(&lun->lun_ref))
+			continue;
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+		spin_lock(&lun->lun_deve_lock);
+		list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
+			lacl = rcu_dereference_check(se_deve->se_lun_acl,
+					lockdep_is_held(&lun->lun_deve_lock));
+
+			/*
+			 * spc4r37 p.242:
+			 * After an explicit target port asymmetric access
+			 * state change, a device server shall establish a
+			 * unit attention condition with the additional sense
+			 * code set to ASYMMETRIC ACCESS STATE CHANGED for
+			 * the initiator port associated with every I_T nexus
+			 * other than the I_T nexus on which the SET TARGET
+			 * PORT GROUPS command was received.
+			 */
+			if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
+			     ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+			   (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
+			    (tg_pt_gp->tg_pt_gp_alua_lun == lun))
+				continue;
+
+			/*
+			 * se_deve->se_lun_acl pointer may be NULL for a
+			 * entry created without explicit Node+MappedLUN ACLs
+			 */
+			if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
+			    (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
+				continue;
+
+			core_scsi3_ua_allocate(se_deve, 0x2A,
+				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
+		}
+		spin_unlock(&lun->lun_deve_lock);
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		percpu_ref_put(&lun->lun_ref);
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
+		struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+	bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
+			 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
+
+	/*
+	 * Update the ALUA metadata buf that has been allocated in
+	 * core_alua_do_port_transition(), this metadata will be written
+	 * to struct file.
+	 *
+	 * Note that there is the case where we do not want to update the
+	 * metadata when the saved metadata is being parsed in userspace
+	 * when setting the existing port access state and access status.
+	 *
+	 * Also note that the failure to write out the ALUA metadata to
+	 * struct file does NOT affect the actual ALUA transition.
+	 */
+	if (tg_pt_gp->tg_pt_gp_write_metadata) {
+		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
+		core_alua_update_tpg_primary_metadata(tg_pt_gp);
+		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
+	}
+	/*
+	 * Set the current primary ALUA access state to the requested new state
+	 */
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+		   tg_pt_gp->tg_pt_gp_alua_pending_state);
+
+	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
+		" from primary access state %s to %s\n", (explicit) ? "explicit" :
+		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+		tg_pt_gp->tg_pt_gp_id,
+		core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
+		core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
+
+	core_alua_queue_state_change_ua(tg_pt_gp);
+
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+	if (tg_pt_gp->tg_pt_gp_transition_complete)
+		complete(tg_pt_gp->tg_pt_gp_transition_complete);
+}
+
+static int core_alua_do_transition_tg_pt(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	int new_state,
+	int explicit)
+{
+	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+	DECLARE_COMPLETION_ONSTACK(wait);
+
+	/* Nothing to be done here */
+	if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
+		return 0;
+
+	if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+		return -EAGAIN;
+
+	/*
+	 * Flush any pending transitions
+	 */
+	if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
+	    atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
+	    ALUA_ACCESS_STATE_TRANSITION) {
+		/* Just in case */
+		tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+		tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+		flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+		wait_for_completion(&wait);
+		tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+		return 0;
+	}
+
+	/*
+	 * Save the old primary ALUA access state, and set the current state
+	 * to ALUA_ACCESS_STATE_TRANSITION.
+	 */
+	tg_pt_gp->tg_pt_gp_alua_previous_state =
+		atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+			ALUA_ACCESS_STATE_TRANSITION);
+	tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
+				ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+				ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
+
+	core_alua_queue_state_change_ua(tg_pt_gp);
+
+	/*
+	 * Check for the optional ALUA primary state transition delay
+	 */
+	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+	/*
+	 * Take a reference for workqueue item
+	 */
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+	if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
+		unsigned long transition_tmo;
+
+		transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
+		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+				   &tg_pt_gp->tg_pt_gp_transition_work,
+				   transition_tmo);
+	} else {
+		tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+				   &tg_pt_gp->tg_pt_gp_transition_work, 0);
+		wait_for_completion(&wait);
+		tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+	}
+
+	return 0;
+}
+
+int core_alua_do_port_transition(
+	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
+	struct se_device *l_dev,
+	struct se_lun *l_lun,
+	struct se_node_acl *l_nacl,
+	int new_state,
+	int explicit)
+{
+	struct se_device *dev;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	int primary, valid_states, rc = 0;
+
+	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
+	if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
+		return -EINVAL;
+
+	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
+	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = local_lu_gp_mem->lu_gp;
+	atomic_inc(&lu_gp->lu_gp_ref_cnt);
+	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
+	/*
+	 * For storage objects that are members of the 'default_lu_gp',
+	 * we only do transition on the passed *l_tp_pt_gp, and not
+	 * on all of the matching target port groups IDs in default_lu_gp.
+	 */
+	if (!lu_gp->lu_gp_id) {
+		/*
+		 * core_alua_do_transition_tg_pt() will always return
+		 * success.
+		 */
+		l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
+		l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
+		rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
+						   new_state, explicit);
+		atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
+		return rc;
+	}
+	/*
+	 * For all other LU groups aside from 'default_lu_gp', walk all of
+	 * the associated storage objects looking for a matching target port
+	 * group ID from the local target port group.
+	 */
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
+				lu_gp_mem_list) {
+
+		dev = lu_gp_mem->lu_gp_mem_dev;
+		atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
+		spin_unlock(&lu_gp->lu_gp_lock);
+
+		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+		list_for_each_entry(tg_pt_gp,
+				&dev->t10_alua.tg_pt_gps_list,
+				tg_pt_gp_list) {
+
+			if (!tg_pt_gp->tg_pt_gp_valid_id)
+				continue;
+			/*
+			 * If the target behavior port asymmetric access state
+			 * is changed for any target port group accessible via
+			 * a logical unit within a LU group, the target port
+			 * behavior group asymmetric access states for the same
+			 * target port group accessible via other logical units
+			 * in that LU group will also change.
+			 */
+			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
+				continue;
+
+			if (l_tg_pt_gp == tg_pt_gp) {
+				tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
+				tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
+			} else {
+				tg_pt_gp->tg_pt_gp_alua_lun = NULL;
+				tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
+			}
+			atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+			/*
+			 * core_alua_do_transition_tg_pt() will always return
+			 * success.
+			 */
+			rc = core_alua_do_transition_tg_pt(tg_pt_gp,
+					new_state, explicit);
+
+			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+			atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			if (rc)
+				break;
+		}
+		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+		spin_lock(&lu_gp->lu_gp_lock);
+		atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	if (!rc) {
+		pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
+			 " Group IDs: %hu %s transition to primary state: %s\n",
+			 config_item_name(&lu_gp->lu_gp_group.cg_item),
+			 l_tg_pt_gp->tg_pt_gp_id,
+			 (explicit) ? "explicit" : "implicit",
+			 core_alua_dump_state(new_state));
+	}
+
+	atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
+	return rc;
+}
+
+static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
+{
+	struct se_portal_group *se_tpg = lun->lun_tpg;
+	unsigned char *md_buf;
+	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+	int len, rc;
+
+	mutex_lock(&lun->lun_tg_pt_md_mutex);
+
+	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+	if (!md_buf) {
+		pr_err("Unable to allocate buf for ALUA metadata\n");
+		rc = -ENOMEM;
+		goto out_unlock;
+	}
+
+	memset(path, 0, ALUA_METADATA_PATH_LEN);
+	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
+
+	len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
+			se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
+
+	if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
+		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
+				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+
+	len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
+			"alua_tg_pt_status=0x%02x\n",
+			atomic_read(&lun->lun_tg_pt_secondary_offline),
+			lun->lun_tg_pt_secondary_stat);
+
+	snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu",
+			se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
+			lun->unpacked_lun);
+
+	rc = core_alua_write_tpg_metadata(path, md_buf, len);
+	kfree(md_buf);
+
+out_unlock:
+	mutex_unlock(&lun->lun_tg_pt_md_mutex);
+	return rc;
+}
+
+static int core_alua_set_tg_pt_secondary_state(
+	struct se_lun *lun,
+	int explicit,
+	int offline)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	int trans_delay_msecs;
+
+	spin_lock(&lun->lun_tg_pt_gp_lock);
+	tg_pt_gp = lun->lun_tg_pt_gp;
+	if (!tg_pt_gp) {
+		spin_unlock(&lun->lun_tg_pt_gp_lock);
+		pr_err("Unable to complete secondary state"
+				" transition\n");
+		return -EINVAL;
+	}
+	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
+	/*
+	 * Set the secondary ALUA target port access state to OFFLINE
+	 * or release the previously secondary state for struct se_lun
+	 */
+	if (offline)
+		atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
+	else
+		atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
+
+	lun->lun_tg_pt_secondary_stat = (explicit) ?
+			ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+			ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
+
+	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
+		" to secondary access state: %s\n", (explicit) ? "explicit" :
+		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
+
+	spin_unlock(&lun->lun_tg_pt_gp_lock);
+	/*
+	 * Do the optional transition delay after we set the secondary
+	 * ALUA access state.
+	 */
+	if (trans_delay_msecs != 0)
+		msleep_interruptible(trans_delay_msecs);
+	/*
+	 * See if we need to update the ALUA fabric port metadata for
+	 * secondary state and status
+	 */
+	if (lun->lun_tg_pt_secondary_write_md)
+		core_alua_update_tpg_secondary_metadata(lun);
+
+	return 0;
+}
+
+struct t10_alua_lba_map *
+core_alua_allocate_lba_map(struct list_head *list,
+			   u64 first_lba, u64 last_lba)
+{
+	struct t10_alua_lba_map *lba_map;
+
+	lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
+	if (!lba_map) {
+		pr_err("Unable to allocate struct t10_alua_lba_map\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
+	lba_map->lba_map_first_lba = first_lba;
+	lba_map->lba_map_last_lba = last_lba;
+
+	list_add_tail(&lba_map->lba_map_list, list);
+	return lba_map;
+}
+
+int
+core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
+			       int pg_id, int state)
+{
+	struct t10_alua_lba_map_member *lba_map_mem;
+
+	list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
+			    lba_map_mem_list) {
+		if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
+			pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
+			return -EINVAL;
+		}
+	}
+
+	lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
+	if (!lba_map_mem) {
+		pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
+		return -ENOMEM;
+	}
+	lba_map_mem->lba_map_mem_alua_state = state;
+	lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
+
+	list_add_tail(&lba_map_mem->lba_map_mem_list,
+		      &lba_map->lba_map_mem_list);
+	return 0;
+}
+
+void
+core_alua_free_lba_map(struct list_head *lba_list)
+{
+	struct t10_alua_lba_map *lba_map, *lba_map_tmp;
+	struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
+
+	list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
+				 lba_map_list) {
+		list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
+					 &lba_map->lba_map_mem_list,
+					 lba_map_mem_list) {
+			list_del(&lba_map_mem->lba_map_mem_list);
+			kmem_cache_free(t10_alua_lba_map_mem_cache,
+					lba_map_mem);
+		}
+		list_del(&lba_map->lba_map_list);
+		kmem_cache_free(t10_alua_lba_map_cache, lba_map);
+	}
+}
+
+void
+core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
+		      int segment_size, int segment_mult)
+{
+	struct list_head old_lba_map_list;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	int activate = 0, supported;
+
+	INIT_LIST_HEAD(&old_lba_map_list);
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	dev->t10_alua.lba_map_segment_size = segment_size;
+	dev->t10_alua.lba_map_segment_multiplier = segment_mult;
+	list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
+	if (lba_map_list) {
+		list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
+		activate = 1;
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
+			    tg_pt_gp_list) {
+
+		if (!tg_pt_gp->tg_pt_gp_valid_id)
+			continue;
+		supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
+		if (activate)
+			supported |= ALUA_LBD_SUP;
+		else
+			supported &= ~ALUA_LBD_SUP;
+		tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
+	}
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+	core_alua_free_lba_map(&old_lba_map_list);
+}
+
+struct t10_alua_lu_gp *
+core_alua_allocate_lu_gp(const char *name, int def_group)
+{
+	struct t10_alua_lu_gp *lu_gp;
+
+	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
+	if (!lu_gp) {
+		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
+	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
+	spin_lock_init(&lu_gp->lu_gp_lock);
+	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
+
+	if (def_group) {
+		lu_gp->lu_gp_id = alua_lu_gps_counter++;
+		lu_gp->lu_gp_valid_id = 1;
+		alua_lu_gps_count++;
+	}
+
+	return lu_gp;
+}
+
+int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
+{
+	struct t10_alua_lu_gp *lu_gp_tmp;
+	u16 lu_gp_id_tmp;
+	/*
+	 * The lu_gp->lu_gp_id may only be set once..
+	 */
+	if (lu_gp->lu_gp_valid_id) {
+		pr_warn("ALUA LU Group already has a valid ID,"
+			" ignoring request\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&lu_gps_lock);
+	if (alua_lu_gps_count == 0x0000ffff) {
+		pr_err("Maximum ALUA alua_lu_gps_count:"
+				" 0x0000ffff reached\n");
+		spin_unlock(&lu_gps_lock);
+		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+		return -ENOSPC;
+	}
+again:
+	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
+				alua_lu_gps_counter++;
+
+	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
+		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
+			if (!lu_gp_id)
+				goto again;
+
+			pr_warn("ALUA Logical Unit Group ID: %hu"
+				" already exists, ignoring request\n",
+				lu_gp_id);
+			spin_unlock(&lu_gps_lock);
+			return -EINVAL;
+		}
+	}
+
+	lu_gp->lu_gp_id = lu_gp_id_tmp;
+	lu_gp->lu_gp_valid_id = 1;
+	list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
+	alua_lu_gps_count++;
+	spin_unlock(&lu_gps_lock);
+
+	return 0;
+}
+
+static struct t10_alua_lu_gp_member *
+core_alua_allocate_lu_gp_mem(struct se_device *dev)
+{
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+
+	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
+	if (!lu_gp_mem) {
+		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
+	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
+	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
+
+	lu_gp_mem->lu_gp_mem_dev = dev;
+	dev->dev_alua_lu_gp_mem = lu_gp_mem;
+
+	return lu_gp_mem;
+}
+
+void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
+{
+	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
+	/*
+	 * Once we have reached this point, config_item_put() has
+	 * already been called from target_core_alua_drop_lu_gp().
+	 *
+	 * Here, we remove the *lu_gp from the global list so that
+	 * no associations can be made while we are releasing
+	 * struct t10_alua_lu_gp.
+	 */
+	spin_lock(&lu_gps_lock);
+	list_del(&lu_gp->lu_gp_node);
+	alua_lu_gps_count--;
+	spin_unlock(&lu_gps_lock);
+	/*
+	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
+	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
+	 * released with core_alua_put_lu_gp_from_name()
+	 */
+	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
+		cpu_relax();
+	/*
+	 * Release reference to struct t10_alua_lu_gp * from all associated
+	 * struct se_device.
+	 */
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
+				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+		if (lu_gp_mem->lu_gp_assoc) {
+			list_del(&lu_gp_mem->lu_gp_mem_list);
+			lu_gp->lu_gp_members--;
+			lu_gp_mem->lu_gp_assoc = 0;
+		}
+		spin_unlock(&lu_gp->lu_gp_lock);
+		/*
+		 *
+		 * lu_gp_mem is associated with a single
+		 * struct se_device->dev_alua_lu_gp_mem, and is released when
+		 * struct se_device is released via core_alua_free_lu_gp_mem().
+		 *
+		 * If the passed lu_gp does NOT match the default_lu_gp, assume
+		 * we want to re-associate a given lu_gp_mem with default_lu_gp.
+		 */
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		if (lu_gp != default_lu_gp)
+			__core_alua_attach_lu_gp_mem(lu_gp_mem,
+					default_lu_gp);
+		else
+			lu_gp_mem->lu_gp = NULL;
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		spin_lock(&lu_gp->lu_gp_lock);
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+}
+
+void core_alua_free_lu_gp_mem(struct se_device *dev)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!lu_gp_mem)
+		return;
+
+	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
+		cpu_relax();
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if (lu_gp) {
+		spin_lock(&lu_gp->lu_gp_lock);
+		if (lu_gp_mem->lu_gp_assoc) {
+			list_del(&lu_gp_mem->lu_gp_mem_list);
+			lu_gp->lu_gp_members--;
+			lu_gp_mem->lu_gp_assoc = 0;
+		}
+		spin_unlock(&lu_gp->lu_gp_lock);
+		lu_gp_mem->lu_gp = NULL;
+	}
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
+}
+
+struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct config_item *ci;
+
+	spin_lock(&lu_gps_lock);
+	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
+		if (!lu_gp->lu_gp_valid_id)
+			continue;
+		ci = &lu_gp->lu_gp_group.cg_item;
+		if (!strcmp(config_item_name(ci), name)) {
+			atomic_inc(&lu_gp->lu_gp_ref_cnt);
+			spin_unlock(&lu_gps_lock);
+			return lu_gp;
+		}
+	}
+	spin_unlock(&lu_gps_lock);
+
+	return NULL;
+}
+
+void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gps_lock);
+	atomic_dec(&lu_gp->lu_gp_ref_cnt);
+	spin_unlock(&lu_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_attach_lu_gp_mem(
+	struct t10_alua_lu_gp_member *lu_gp_mem,
+	struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gp->lu_gp_lock);
+	lu_gp_mem->lu_gp = lu_gp;
+	lu_gp_mem->lu_gp_assoc = 1;
+	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
+	lu_gp->lu_gp_members++;
+	spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_drop_lu_gp_mem(
+	struct t10_alua_lu_gp_member *lu_gp_mem,
+	struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_del(&lu_gp_mem->lu_gp_mem_list);
+	lu_gp_mem->lu_gp = NULL;
+	lu_gp_mem->lu_gp_assoc = 0;
+	lu_gp->lu_gp_members--;
+	spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
+		const char *name, int def_group)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
+	if (!tg_pt_gp) {
+		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
+	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
+	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
+	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
+	INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+			  core_alua_do_transition_tg_pt_work);
+	tg_pt_gp->tg_pt_gp_dev = dev;
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+		ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
+	/*
+	 * Enable both explicit and implicit ALUA support by default
+	 */
+	tg_pt_gp->tg_pt_gp_alua_access_type =
+			TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
+	/*
+	 * Set the default Active/NonOptimized Delay in milliseconds
+	 */
+	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
+	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
+	tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
+
+	/*
+	 * Enable all supported states
+	 */
+	tg_pt_gp->tg_pt_gp_alua_supported_states =
+	    ALUA_T_SUP | ALUA_O_SUP |
+	    ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
+
+	if (def_group) {
+		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+		tg_pt_gp->tg_pt_gp_id =
+				dev->t10_alua.alua_tg_pt_gps_counter++;
+		tg_pt_gp->tg_pt_gp_valid_id = 1;
+		dev->t10_alua.alua_tg_pt_gps_count++;
+		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+			      &dev->t10_alua.tg_pt_gps_list);
+		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+	}
+
+	return tg_pt_gp;
+}
+
+int core_alua_set_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	u16 tg_pt_gp_id)
+{
+	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
+	u16 tg_pt_gp_id_tmp;
+
+	/*
+	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
+	 */
+	if (tg_pt_gp->tg_pt_gp_valid_id) {
+		pr_warn("ALUA TG PT Group already has a valid ID,"
+			" ignoring request\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
+		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
+			" 0x0000ffff reached\n");
+		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+		return -ENOSPC;
+	}
+again:
+	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
+			dev->t10_alua.alua_tg_pt_gps_counter++;
+
+	list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
+			tg_pt_gp_list) {
+		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
+			if (!tg_pt_gp_id)
+				goto again;
+
+			pr_err("ALUA Target Port Group ID: %hu already"
+				" exists, ignoring request\n", tg_pt_gp_id);
+			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+			return -EINVAL;
+		}
+	}
+
+	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
+	tg_pt_gp->tg_pt_gp_valid_id = 1;
+	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+			&dev->t10_alua.tg_pt_gps_list);
+	dev->t10_alua.alua_tg_pt_gps_count++;
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+	return 0;
+}
+
+void core_alua_free_tg_pt_gp(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+	struct se_lun *lun, *next;
+
+	/*
+	 * Once we have reached this point, config_item_put() has already
+	 * been called from target_core_alua_drop_tg_pt_gp().
+	 *
+	 * Here we remove *tg_pt_gp from the global list so that
+	 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
+	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
+	 */
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	list_del(&tg_pt_gp->tg_pt_gp_list);
+	dev->t10_alua.alua_tg_pt_gps_counter--;
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+	flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+
+	/*
+	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
+	 * core_alua_get_tg_pt_gp_by_name() in
+	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
+	 * to be released with core_alua_put_tg_pt_gp_from_name().
+	 */
+	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
+		cpu_relax();
+
+	/*
+	 * Release reference to struct t10_alua_tg_pt_gp from all associated
+	 * struct se_port.
+	 */
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry_safe(lun, next,
+			&tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
+		list_del_init(&lun->lun_tg_pt_gp_link);
+		tg_pt_gp->tg_pt_gp_members--;
+
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+		/*
+		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
+		 * assume we want to re-associate a given tg_pt_gp_mem with
+		 * default_tg_pt_gp.
+		 */
+		spin_lock(&lun->lun_tg_pt_gp_lock);
+		if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
+			__target_attach_tg_pt_gp(lun,
+					dev->t10_alua.default_tg_pt_gp);
+		} else
+			lun->lun_tg_pt_gp = NULL;
+		spin_unlock(&lun->lun_tg_pt_gp_lock);
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+}
+
+static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
+		struct se_device *dev, const char *name)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct config_item *ci;
+
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
+			tg_pt_gp_list) {
+		if (!tg_pt_gp->tg_pt_gp_valid_id)
+			continue;
+		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+		if (!strcmp(config_item_name(ci), name)) {
+			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+			return tg_pt_gp;
+		}
+	}
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+	return NULL;
+}
+
+static void core_alua_put_tg_pt_gp_from_name(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+}
+
+static void __target_attach_tg_pt_gp(struct se_lun *lun,
+		struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_dev_entry *se_deve;
+
+	assert_spin_locked(&lun->lun_tg_pt_gp_lock);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	lun->lun_tg_pt_gp = tg_pt_gp;
+	list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
+	tg_pt_gp->tg_pt_gp_members++;
+	spin_lock(&lun->lun_deve_lock);
+	list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
+		core_scsi3_ua_allocate(se_deve, 0x3f,
+				       ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
+	spin_unlock(&lun->lun_deve_lock);
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+void target_attach_tg_pt_gp(struct se_lun *lun,
+		struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	spin_lock(&lun->lun_tg_pt_gp_lock);
+	__target_attach_tg_pt_gp(lun, tg_pt_gp);
+	spin_unlock(&lun->lun_tg_pt_gp_lock);
+}
+
+static void __target_detach_tg_pt_gp(struct se_lun *lun,
+		struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	assert_spin_locked(&lun->lun_tg_pt_gp_lock);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_del_init(&lun->lun_tg_pt_gp_link);
+	tg_pt_gp->tg_pt_gp_members--;
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	lun->lun_tg_pt_gp = NULL;
+}
+
+void target_detach_tg_pt_gp(struct se_lun *lun)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+	spin_lock(&lun->lun_tg_pt_gp_lock);
+	tg_pt_gp = lun->lun_tg_pt_gp;
+	if (tg_pt_gp)
+		__target_detach_tg_pt_gp(lun, tg_pt_gp);
+	spin_unlock(&lun->lun_tg_pt_gp_lock);
+}
+
+ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
+{
+	struct config_item *tg_pt_ci;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	ssize_t len = 0;
+
+	spin_lock(&lun->lun_tg_pt_gp_lock);
+	tg_pt_gp = lun->lun_tg_pt_gp;
+	if (tg_pt_gp) {
+		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
+			" %hu\nTG Port Primary Access State: %s\nTG Port "
+			"Primary Access Status: %s\nTG Port Secondary Access"
+			" State: %s\nTG Port Secondary Access Status: %s\n",
+			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
+			core_alua_dump_state(atomic_read(
+					&tg_pt_gp->tg_pt_gp_alua_access_state)),
+			core_alua_dump_status(
+				tg_pt_gp->tg_pt_gp_alua_access_status),
+			atomic_read(&lun->lun_tg_pt_secondary_offline) ?
+			"Offline" : "None",
+			core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
+	}
+	spin_unlock(&lun->lun_tg_pt_gp_lock);
+
+	return len;
+}
+
+ssize_t core_alua_store_tg_pt_gp_info(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	struct se_portal_group *tpg = lun->lun_tpg;
+	/*
+	 * rcu_dereference_raw protected by se_lun->lun_group symlink
+	 * reference to se_device->dev_group.
+	 */
+	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
+	unsigned char buf[TG_PT_GROUP_NAME_BUF];
+	int move = 0;
+
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+	    (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+		return -ENODEV;
+
+	if (count > TG_PT_GROUP_NAME_BUF) {
+		pr_err("ALUA Target Port Group alias too large!\n");
+		return -EINVAL;
+	}
+	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+	memcpy(buf, page, count);
+	/*
+	 * Any ALUA target port group alias besides "NULL" means we will be
+	 * making a new group association.
+	 */
+	if (strcmp(strstrip(buf), "NULL")) {
+		/*
+		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
+		 * struct t10_alua_tg_pt_gp.  This reference is released with
+		 * core_alua_put_tg_pt_gp_from_name() below.
+		 */
+		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
+					strstrip(buf));
+		if (!tg_pt_gp_new)
+			return -ENODEV;
+	}
+
+	spin_lock(&lun->lun_tg_pt_gp_lock);
+	tg_pt_gp = lun->lun_tg_pt_gp;
+	if (tg_pt_gp) {
+		/*
+		 * Clearing an existing tg_pt_gp association, and replacing
+		 * with the default_tg_pt_gp.
+		 */
+		if (!tg_pt_gp_new) {
+			pr_debug("Target_Core_ConfigFS: Moving"
+				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
+				" alua/%s, ID: %hu back to"
+				" default_tg_pt_gp\n",
+				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+				tpg->se_tpg_tfo->tpg_get_tag(tpg),
+				config_item_name(&lun->lun_group.cg_item),
+				config_item_name(
+					&tg_pt_gp->tg_pt_gp_group.cg_item),
+				tg_pt_gp->tg_pt_gp_id);
+
+			__target_detach_tg_pt_gp(lun, tg_pt_gp);
+			__target_attach_tg_pt_gp(lun,
+					dev->t10_alua.default_tg_pt_gp);
+			spin_unlock(&lun->lun_tg_pt_gp_lock);
+
+			return count;
+		}
+		__target_detach_tg_pt_gp(lun, tg_pt_gp);
+		move = 1;
+	}
+
+	__target_attach_tg_pt_gp(lun, tg_pt_gp_new);
+	spin_unlock(&lun->lun_tg_pt_gp_lock);
+	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
+		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg),
+		config_item_name(&lun->lun_group.cg_item),
+		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
+		tg_pt_gp_new->tg_pt_gp_id);
+
+	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+	return count;
+}
+
+ssize_t core_alua_show_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
+	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
+		return sprintf(page, "Implicit and Explicit\n");
+	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
+		return sprintf(page, "Implicit\n");
+	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
+		return sprintf(page, "Explicit\n");
+	else
+		return sprintf(page, "None\n");
+}
+
+ssize_t core_alua_store_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract alua_access_type\n");
+		return ret;
+	}
+	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
+		pr_err("Illegal value for alua_access_type:"
+				" %lu\n", tmp);
+		return -EINVAL;
+	}
+	if (tmp == 3)
+		tg_pt_gp->tg_pt_gp_alua_access_type =
+			TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
+	else if (tmp == 2)
+		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
+	else if (tmp == 1)
+		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
+	else
+		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
+
+	return count;
+}
+
+ssize_t core_alua_show_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
+}
+
+ssize_t core_alua_store_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract nonop_delay_msecs\n");
+		return ret;
+	}
+	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
+		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
+			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
+			ALUA_MAX_NONOP_DELAY_MSECS);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+}
+
+ssize_t core_alua_store_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract trans_delay_msecs\n");
+		return ret;
+	}
+	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
+		pr_err("Passed trans_delay_msecs: %lu, exceeds"
+			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
+			ALUA_MAX_TRANS_DELAY_MSECS);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_implicit_trans_secs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
+}
+
+ssize_t core_alua_store_implicit_trans_secs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract implicit_trans_secs\n");
+		return ret;
+	}
+	if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
+		pr_err("Passed implicit_trans_secs: %lu, exceeds"
+			" ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
+			ALUA_MAX_IMPLICIT_TRANS_SECS);
+		return  -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_preferred_bit(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
+}
+
+ssize_t core_alua_store_preferred_bit(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract preferred ALUA value\n");
+		return ret;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
+{
+	return sprintf(page, "%d\n",
+		atomic_read(&lun->lun_tg_pt_secondary_offline));
+}
+
+ssize_t core_alua_store_offline_bit(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	/*
+	 * rcu_dereference_raw protected by se_lun->lun_group symlink
+	 * reference to se_device->dev_group.
+	 */
+	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+	unsigned long tmp;
+	int ret;
+
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+	    (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+		return -ENODEV;
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract alua_tg_pt_offline value\n");
+		return ret;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
+				tmp);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
+	if (ret < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+ssize_t core_alua_show_secondary_status(
+	struct se_lun *lun,
+	char *page)
+{
+	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
+}
+
+ssize_t core_alua_store_secondary_status(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract alua_tg_pt_status\n");
+		return ret;
+	}
+	if ((tmp != ALUA_STATUS_NONE) &&
+	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
+		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
+				tmp);
+		return -EINVAL;
+	}
+	lun->lun_tg_pt_secondary_stat = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_secondary_write_metadata(
+	struct se_lun *lun,
+	char *page)
+{
+	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
+}
+
+ssize_t core_alua_store_secondary_write_metadata(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract alua_tg_pt_write_md\n");
+		return ret;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		pr_err("Illegal value for alua_tg_pt_write_md:"
+				" %lu\n", tmp);
+		return -EINVAL;
+	}
+	lun->lun_tg_pt_secondary_write_md = (int)tmp;
+
+	return count;
+}
+
+int core_setup_alua(struct se_device *dev)
+{
+	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
+		struct t10_alua_lu_gp_member *lu_gp_mem;
+
+		/*
+		 * Associate this struct se_device with the default ALUA
+		 * LUN Group.
+		 */
+		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
+		if (IS_ERR(lu_gp_mem))
+			return PTR_ERR(lu_gp_mem);
+
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		__core_alua_attach_lu_gp_mem(lu_gp_mem,
+				default_lu_gp);
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		pr_debug("%s: Adding to default ALUA LU Group:"
+			" core/alua/lu_gps/default_lu_gp\n",
+			dev->transport->name);
+	}
+
+	return 0;
+}
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
new file mode 100644
index 0000000..9b250f9
--- /dev/null
+++ b/drivers/target/target_core_alua.h
@@ -0,0 +1,157 @@
+#ifndef TARGET_CORE_ALUA_H
+#define TARGET_CORE_ALUA_H
+
+/*
+ * INQUIRY response data, TPGS Field
+ *
+ * from spc4r17 section 6.4.2 Table 135
+ */
+#define TPGS_NO_ALUA				0x00
+#define TPGS_IMPLICIT_ALUA			0x10
+#define TPGS_EXPLICIT_ALUA			0x20
+
+/*
+ * ASYMMETRIC ACCESS STATE field
+ *
+ * from spc4r36j section 6.37 Table 307
+ */
+#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED	0x0
+#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED	0x1
+#define ALUA_ACCESS_STATE_STANDBY		0x2
+#define ALUA_ACCESS_STATE_UNAVAILABLE		0x3
+#define ALUA_ACCESS_STATE_LBA_DEPENDENT		0x4
+#define ALUA_ACCESS_STATE_OFFLINE		0xe
+#define ALUA_ACCESS_STATE_TRANSITION		0xf
+
+/*
+ * from spc4r36j section 6.37 Table 306
+ */
+#define ALUA_T_SUP		0x80
+#define ALUA_O_SUP		0x40
+#define ALUA_LBD_SUP		0x10
+#define ALUA_U_SUP		0x08
+#define ALUA_S_SUP		0x04
+#define ALUA_AN_SUP		0x02
+#define ALUA_AO_SUP		0x01
+
+/*
+ * REPORT_TARGET_PORT_GROUP STATUS CODE
+ *
+ * from spc4r17 section 6.27 Table 246
+ */
+#define ALUA_STATUS_NONE				0x00
+#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG		0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA		0x02
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_04H_ALUA_STATE_TRANSITION			0x0a
+#define ASCQ_04H_ALUA_TG_PT_STANDBY			0x0b
+#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE			0x0c
+#define ASCQ_04H_ALUA_OFFLINE				0x12
+
+/*
+ * Used as the default for Active/NonOptimized delay (in milliseconds)
+ * This can also be changed via configfs on a per target port group basis..
+ */
+#define ALUA_DEFAULT_NONOP_DELAY_MSECS			100
+#define ALUA_MAX_NONOP_DELAY_MSECS			10000 /* 10 seconds */
+/*
+ * Used for implicit and explicit ALUA transitional delay, that is disabled
+ * by default, and is intended to be used for debugging client side ALUA code.
+ */
+#define ALUA_DEFAULT_TRANS_DELAY_MSECS			0
+#define ALUA_MAX_TRANS_DELAY_MSECS			30000 /* 30 seconds */
+/*
+ * Used for the recommended application client implicit transition timeout
+ * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
+ */
+#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS			0
+#define ALUA_MAX_IMPLICIT_TRANS_SECS			255
+/*
+ * Used by core_alua_update_tpg_primary_metadata() and
+ * core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_METADATA_PATH_LEN				512
+/*
+ * Used by core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_SECONDARY_METADATA_WWN_LEN			256
+
+/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
+#define ALUA_MD_BUF_LEN					1024
+
+extern struct kmem_cache *t10_alua_lu_gp_cache;
+extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
+extern struct kmem_cache *t10_alua_lba_map_cache;
+extern struct kmem_cache *t10_alua_lba_map_mem_cache;
+
+extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
+extern int core_alua_check_nonop_delay(struct se_cmd *);
+extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
+				struct se_device *, struct se_lun *,
+				struct se_node_acl *, int, int);
+extern char *core_alua_dump_status(int);
+extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
+				struct list_head *, u64, u64);
+extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
+extern void core_alua_free_lba_map(struct list_head *);
+extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
+				int, int);
+extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
+extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
+extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
+extern void core_alua_free_lu_gp_mem(struct se_device *);
+extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
+extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
+extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
+					struct t10_alua_lu_gp *);
+extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
+					struct t10_alua_lu_gp *);
+extern void core_alua_drop_lu_gp_dev(struct se_device *);
+extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+			struct se_device *, const char *, int);
+extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
+extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
+extern void target_detach_tg_pt_gp(struct se_lun *);
+extern void target_attach_tg_pt_gp(struct se_lun *, struct t10_alua_tg_pt_gp *);
+extern ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *, char *);
+extern ssize_t core_alua_store_tg_pt_gp_info(struct se_lun *, const char *,
+						size_t);
+extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
+extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+						char *);
+extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
+extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
+					size_t);
+extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
+extern ssize_t core_alua_store_secondary_status(struct se_lun *,
+					const char *, size_t);
+extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
+					char *);
+extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
+					const char *, size_t);
+extern int core_setup_alua(struct se_device *);
+extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
+
+#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
new file mode 100644
index 0000000..b9b9ffd
--- /dev/null
+++ b/drivers/target/target_core_configfs.c
@@ -0,0 +1,3300 @@
+/*******************************************************************************
+ * Filename:  target_core_configfs.c
+ *
+ * This file contains ConfigFS logic for the Generic Target Engine project.
+ *
+ * (c) Copyright 2008-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * based on configfs Copyright (C) 2005 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+#include <linux/spinlock.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_rd.h"
+#include "target_core_xcopy.h"
+
+#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)		\
+static void target_core_setup_##_name##_cit(struct target_backend *tb)	\
+{									\
+	struct config_item_type *cit = &tb->tb_##_name##_cit;		\
+									\
+	cit->ct_item_ops = _item_ops;					\
+	cit->ct_group_ops = _group_ops;					\
+	cit->ct_attrs = _attrs;						\
+	cit->ct_owner = tb->ops->owner;					\
+	pr_debug("Setup generic %s\n", __stringify(_name));		\
+}
+
+#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops)			\
+static void target_core_setup_##_name##_cit(struct target_backend *tb)	\
+{									\
+	struct config_item_type *cit = &tb->tb_##_name##_cit;		\
+									\
+	cit->ct_item_ops = _item_ops;					\
+	cit->ct_group_ops = _group_ops;					\
+	cit->ct_attrs = tb->ops->tb_##_name##_attrs;			\
+	cit->ct_owner = tb->ops->owner;					\
+	pr_debug("Setup generic %s\n", __stringify(_name));		\
+}
+
+extern struct t10_alua_lu_gp *default_lu_gp;
+
+static LIST_HEAD(g_tf_list);
+static DEFINE_MUTEX(g_tf_lock);
+
+static struct config_group target_core_hbagroup;
+static struct config_group alua_group;
+static struct config_group alua_lu_gps_group;
+
+static inline struct se_hba *
+item_to_hba(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_hba, hba_group);
+}
+
+/*
+ * Attributes for /sys/kernel/config/target/
+ */
+static ssize_t target_core_item_version_show(struct config_item *item,
+		char *page)
+{
+	return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
+		" on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
+		utsname()->sysname, utsname()->machine);
+}
+
+CONFIGFS_ATTR_RO(target_core_item_, version);
+
+static struct target_fabric_configfs *target_core_get_fabric(
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+
+	if (!name)
+		return NULL;
+
+	mutex_lock(&g_tf_lock);
+	list_for_each_entry(tf, &g_tf_list, tf_list) {
+		if (!strcmp(tf->tf_ops->name, name)) {
+			atomic_inc(&tf->tf_access_cnt);
+			mutex_unlock(&g_tf_lock);
+			return tf;
+		}
+	}
+	mutex_unlock(&g_tf_lock);
+
+	return NULL;
+}
+
+/*
+ * Called from struct target_core_group_ops->make_group()
+ */
+static struct config_group *target_core_register_fabric(
+	struct config_group *group,
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
+			" %s\n", group, name);
+
+	tf = target_core_get_fabric(name);
+	if (!tf) {
+		pr_debug("target_core_register_fabric() trying autoload for %s\n",
+			 name);
+
+		/*
+		 * Below are some hardcoded request_module() calls to automatically
+		 * local fabric modules when the following is called:
+		 *
+		 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
+		 *
+		 * Note that this does not limit which TCM fabric module can be
+		 * registered, but simply provids auto loading logic for modules with
+		 * mkdir(2) system calls with known TCM fabric modules.
+		 */
+
+		if (!strncmp(name, "iscsi", 5)) {
+			/*
+			 * Automatically load the LIO Target fabric module when the
+			 * following is called:
+			 *
+			 * mkdir -p $CONFIGFS/target/iscsi
+			 */
+			ret = request_module("iscsi_target_mod");
+			if (ret < 0) {
+				pr_debug("request_module() failed for"
+				         " iscsi_target_mod.ko: %d\n", ret);
+				return ERR_PTR(-EINVAL);
+			}
+		} else if (!strncmp(name, "loopback", 8)) {
+			/*
+			 * Automatically load the tcm_loop fabric module when the
+			 * following is called:
+			 *
+			 * mkdir -p $CONFIGFS/target/loopback
+			 */
+			ret = request_module("tcm_loop");
+			if (ret < 0) {
+				pr_debug("request_module() failed for"
+				         " tcm_loop.ko: %d\n", ret);
+				return ERR_PTR(-EINVAL);
+			}
+		}
+
+		tf = target_core_get_fabric(name);
+	}
+
+	if (!tf) {
+		pr_debug("target_core_get_fabric() failed for %s\n",
+		         name);
+		return ERR_PTR(-EINVAL);
+	}
+	pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
+			" %s\n", tf->tf_ops->name);
+	/*
+	 * On a successful target_core_get_fabric() look, the returned
+	 * struct target_fabric_configfs *tf will contain a usage reference.
+	 */
+	pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+			&tf->tf_wwn_cit);
+
+	tf->tf_group.default_groups = tf->tf_default_groups;
+	tf->tf_group.default_groups[0] = &tf->tf_disc_group;
+	tf->tf_group.default_groups[1] = NULL;
+
+	config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
+	config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
+			&tf->tf_discovery_cit);
+
+	pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+			" %s\n", tf->tf_group.cg_item.ci_name);
+	return &tf->tf_group;
+}
+
+/*
+ * Called from struct target_core_group_ops->drop_item()
+ */
+static void target_core_deregister_fabric(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct target_fabric_configfs *tf = container_of(
+		to_config_group(item), struct target_fabric_configfs, tf_group);
+	struct config_group *tf_group;
+	struct config_item *df_item;
+	int i;
+
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+		" tf list\n", config_item_name(item));
+
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+			" %s\n", tf->tf_ops->name);
+	atomic_dec(&tf->tf_access_cnt);
+
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+			" %s\n", config_item_name(item));
+
+	tf_group = &tf->tf_group;
+	for (i = 0; tf_group->default_groups[i]; i++) {
+		df_item = &tf_group->default_groups[i]->cg_item;
+		tf_group->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_fabric_group_ops = {
+	.make_group	= &target_core_register_fabric,
+	.drop_item	= &target_core_deregister_fabric,
+};
+
+/*
+ * All item attributes appearing in /sys/kernel/target/ appear here.
+ */
+static struct configfs_attribute *target_core_fabric_item_attrs[] = {
+	&target_core_item_attr_version,
+	NULL,
+};
+
+/*
+ * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
+ */
+static struct config_item_type target_core_fabrics_item = {
+	.ct_group_ops	= &target_core_fabric_group_ops,
+	.ct_attrs	= target_core_fabric_item_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct configfs_subsystem target_core_fabrics = {
+	.su_group = {
+		.cg_item = {
+			.ci_namebuf = "target",
+			.ci_type = &target_core_fabrics_item,
+		},
+	},
+};
+
+int target_depend_item(struct config_item *item)
+{
+	return configfs_depend_item(&target_core_fabrics, item);
+}
+EXPORT_SYMBOL(target_depend_item);
+
+void target_undepend_item(struct config_item *item)
+{
+	return configfs_undepend_item(&target_core_fabrics, item);
+}
+EXPORT_SYMBOL(target_undepend_item);
+
+/*##############################################################################
+// Start functions called by external Target Fabrics Modules
+//############################################################################*/
+
+static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
+{
+	if (!tfo->name) {
+		pr_err("Missing tfo->name\n");
+		return -EINVAL;
+	}
+	if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) {
+		pr_err("Passed name: %s exceeds TARGET_FABRIC"
+			"_NAME_SIZE\n", tfo->name);
+		return -EINVAL;
+	}
+	if (!tfo->get_fabric_name) {
+		pr_err("Missing tfo->get_fabric_name()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_get_wwn) {
+		pr_err("Missing tfo->tpg_get_wwn()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_get_tag) {
+		pr_err("Missing tfo->tpg_get_tag()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_check_demo_mode) {
+		pr_err("Missing tfo->tpg_check_demo_mode()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_check_demo_mode_cache) {
+		pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_check_demo_mode_write_protect) {
+		pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_check_prod_mode_write_protect) {
+		pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_get_inst_index) {
+		pr_err("Missing tfo->tpg_get_inst_index()\n");
+		return -EINVAL;
+	}
+	if (!tfo->release_cmd) {
+		pr_err("Missing tfo->release_cmd()\n");
+		return -EINVAL;
+	}
+	if (!tfo->shutdown_session) {
+		pr_err("Missing tfo->shutdown_session()\n");
+		return -EINVAL;
+	}
+	if (!tfo->close_session) {
+		pr_err("Missing tfo->close_session()\n");
+		return -EINVAL;
+	}
+	if (!tfo->sess_get_index) {
+		pr_err("Missing tfo->sess_get_index()\n");
+		return -EINVAL;
+	}
+	if (!tfo->write_pending) {
+		pr_err("Missing tfo->write_pending()\n");
+		return -EINVAL;
+	}
+	if (!tfo->write_pending_status) {
+		pr_err("Missing tfo->write_pending_status()\n");
+		return -EINVAL;
+	}
+	if (!tfo->set_default_node_attributes) {
+		pr_err("Missing tfo->set_default_node_attributes()\n");
+		return -EINVAL;
+	}
+	if (!tfo->get_cmd_state) {
+		pr_err("Missing tfo->get_cmd_state()\n");
+		return -EINVAL;
+	}
+	if (!tfo->queue_data_in) {
+		pr_err("Missing tfo->queue_data_in()\n");
+		return -EINVAL;
+	}
+	if (!tfo->queue_status) {
+		pr_err("Missing tfo->queue_status()\n");
+		return -EINVAL;
+	}
+	if (!tfo->queue_tm_rsp) {
+		pr_err("Missing tfo->queue_tm_rsp()\n");
+		return -EINVAL;
+	}
+	if (!tfo->aborted_task) {
+		pr_err("Missing tfo->aborted_task()\n");
+		return -EINVAL;
+	}
+	/*
+	 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
+	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
+	 * target_core_fabric_configfs.c WWN+TPG group context code.
+	 */
+	if (!tfo->fabric_make_wwn) {
+		pr_err("Missing tfo->fabric_make_wwn()\n");
+		return -EINVAL;
+	}
+	if (!tfo->fabric_drop_wwn) {
+		pr_err("Missing tfo->fabric_drop_wwn()\n");
+		return -EINVAL;
+	}
+	if (!tfo->fabric_make_tpg) {
+		pr_err("Missing tfo->fabric_make_tpg()\n");
+		return -EINVAL;
+	}
+	if (!tfo->fabric_drop_tpg) {
+		pr_err("Missing tfo->fabric_drop_tpg()\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int target_register_template(const struct target_core_fabric_ops *fo)
+{
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	ret = target_fabric_tf_ops_check(fo);
+	if (ret)
+		return ret;
+
+	tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
+	if (!tf) {
+		pr_err("%s: could not allocate memory!\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&tf->tf_list);
+	atomic_set(&tf->tf_access_cnt, 0);
+	tf->tf_ops = fo;
+	target_fabric_setup_cits(tf);
+
+	mutex_lock(&g_tf_lock);
+	list_add_tail(&tf->tf_list, &g_tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(target_register_template);
+
+void target_unregister_template(const struct target_core_fabric_ops *fo)
+{
+	struct target_fabric_configfs *t;
+
+	mutex_lock(&g_tf_lock);
+	list_for_each_entry(t, &g_tf_list, tf_list) {
+		if (!strcmp(t->tf_ops->name, fo->name)) {
+			BUG_ON(atomic_read(&t->tf_access_cnt));
+			list_del(&t->tf_list);
+			mutex_unlock(&g_tf_lock);
+			/*
+			 * Wait for any outstanding fabric se_deve_entry->rcu_head
+			 * callbacks to complete post kfree_rcu(), before allowing
+			 * fabric driver unload of TFO->module to proceed.
+			 */
+			rcu_barrier();
+			kfree(t);
+			return;
+		}
+	}
+	mutex_unlock(&g_tf_lock);
+}
+EXPORT_SYMBOL(target_unregister_template);
+
+/*##############################################################################
+// Stop functions called by external Target Fabrics Modules
+//############################################################################*/
+
+static inline struct se_dev_attrib *to_attrib(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_dev_attrib,
+			da_group);
+}
+
+/* Start functions for struct config_item_type tb_dev_attrib_cit */
+#define DEF_CONFIGFS_ATTRIB_SHOW(_name)					\
+static ssize_t _name##_show(struct config_item *item, char *page)	\
+{									\
+	return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
+}
+
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
+DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
+DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
+DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_format);
+DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
+DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
+DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
+DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
+DEF_CONFIGFS_ATTRIB_SHOW(block_size);
+DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
+DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
+DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
+DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
+DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
+DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
+DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
+DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
+DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
+
+#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name)				\
+static ssize_t _name##_store(struct config_item *item, const char *page,\
+		size_t count)						\
+{									\
+	struct se_dev_attrib *da = to_attrib(item);			\
+	u32 val;							\
+	int ret;							\
+									\
+	ret = kstrtou32(page, 0, &val);					\
+	if (ret < 0)							\
+		return ret;						\
+	da->_name = val;						\
+	return count;							\
+}
+
+DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
+DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
+DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
+DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
+DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
+
+#define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name)				\
+static ssize_t _name##_store(struct config_item *item, const char *page,	\
+		size_t count)						\
+{									\
+	struct se_dev_attrib *da = to_attrib(item);			\
+	bool flag;							\
+	int ret;							\
+									\
+	ret = strtobool(page, &flag);					\
+	if (ret < 0)							\
+		return ret;						\
+	da->_name = flag;						\
+	return count;							\
+}
+
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
+
+#define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name)				\
+static ssize_t _name##_store(struct config_item *item, const char *page,\
+		size_t count)						\
+{									\
+	printk_once(KERN_WARNING					\
+		"ignoring deprecated ##_name## attribute\n");	\
+	return count;							\
+}
+
+DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
+DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
+
+static void dev_set_t10_wwn_model_alias(struct se_device *dev)
+{
+	const char *configname;
+
+	configname = config_item_name(&dev->dev_group.cg_item);
+	if (strlen(configname) >= 16) {
+		pr_warn("dev[%p]: Backstore name '%s' is too long for "
+			"INQUIRY_MODEL, truncating to 16 bytes\n", dev,
+			configname);
+	}
+	snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
+}
+
+static ssize_t emulate_model_alias_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	struct se_device *dev = da->da_dev;
+	bool flag;
+	int ret;
+
+	if (dev->export_count) {
+		pr_err("dev[%p]: Unable to change model alias"
+			" while export_count is %d\n",
+			dev, dev->export_count);
+		return -EINVAL;
+	}
+
+	ret = strtobool(page, &flag);
+	if (ret < 0)
+		return ret;
+
+	if (flag) {
+		dev_set_t10_wwn_model_alias(dev);
+	} else {
+		strncpy(&dev->t10_wwn.model[0],
+			dev->transport->inquiry_prod, 16);
+	}
+	da->emulate_model_alias = flag;
+	return count;
+}
+
+static ssize_t emulate_write_cache_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	bool flag;
+	int ret;
+
+	ret = strtobool(page, &flag);
+	if (ret < 0)
+		return ret;
+
+	if (flag && da->da_dev->transport->get_write_cache) {
+		pr_err("emulate_write_cache not supported for this device\n");
+		return -EINVAL;
+	}
+
+	da->emulate_write_cache = flag;
+	pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+			da->da_dev, flag);
+	return count;
+}
+
+static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	u32 val;
+	int ret;
+
+	ret = kstrtou32(page, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val != 0 && val != 1 && val != 2) {
+		pr_err("Illegal value %d\n", val);
+		return -EINVAL;
+	}
+
+	if (da->da_dev->export_count) {
+		pr_err("dev[%p]: Unable to change SE Device"
+			" UA_INTRLCK_CTRL while export_count is %d\n",
+			da->da_dev, da->da_dev->export_count);
+		return -EINVAL;
+	}
+	da->emulate_ua_intlck_ctrl = val;
+	pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+		da->da_dev, val);
+	return count;
+}
+
+static ssize_t emulate_tas_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	bool flag;
+	int ret;
+
+	ret = strtobool(page, &flag);
+	if (ret < 0)
+		return ret;
+
+	if (da->da_dev->export_count) {
+		pr_err("dev[%p]: Unable to change SE Device TAS while"
+			" export_count is %d\n",
+			da->da_dev, da->da_dev->export_count);
+		return -EINVAL;
+	}
+	da->emulate_tas = flag;
+	pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+		da->da_dev, flag ? "Enabled" : "Disabled");
+
+	return count;
+}
+
+static ssize_t emulate_tpu_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	bool flag;
+	int ret;
+
+	ret = strtobool(page, &flag);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * We expect this value to be non-zero when generic Block Layer
+	 * Discard supported is detected iblock_create_virtdevice().
+	 */
+	if (flag && !da->max_unmap_block_desc_count) {
+		pr_err("Generic Block Discard not supported\n");
+		return -ENOSYS;
+	}
+
+	da->emulate_tpu = flag;
+	pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+		da->da_dev, flag);
+	return count;
+}
+
+static ssize_t emulate_tpws_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	bool flag;
+	int ret;
+
+	ret = strtobool(page, &flag);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * We expect this value to be non-zero when generic Block Layer
+	 * Discard supported is detected iblock_create_virtdevice().
+	 */
+	if (flag && !da->max_unmap_block_desc_count) {
+		pr_err("Generic Block Discard not supported\n");
+		return -ENOSYS;
+	}
+
+	da->emulate_tpws = flag;
+	pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+				da->da_dev, flag);
+	return count;
+}
+
+static ssize_t pi_prot_type_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	int old_prot = da->pi_prot_type, ret;
+	struct se_device *dev = da->da_dev;
+	u32 flag;
+
+	ret = kstrtou32(page, 0, &flag);
+	if (ret < 0)
+		return ret;
+
+	if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
+		pr_err("Illegal value %d for pi_prot_type\n", flag);
+		return -EINVAL;
+	}
+	if (flag == 2) {
+		pr_err("DIF TYPE2 protection currently not supported\n");
+		return -ENOSYS;
+	}
+	if (da->hw_pi_prot_type) {
+		pr_warn("DIF protection enabled on underlying hardware,"
+			" ignoring\n");
+		return count;
+	}
+	if (!dev->transport->init_prot || !dev->transport->free_prot) {
+		/* 0 is only allowed value for non-supporting backends */
+		if (flag == 0)
+			return count;
+
+		pr_err("DIF protection not supported by backend: %s\n",
+		       dev->transport->name);
+		return -ENOSYS;
+	}
+	if (!(dev->dev_flags & DF_CONFIGURED)) {
+		pr_err("DIF protection requires device to be configured\n");
+		return -ENODEV;
+	}
+	if (dev->export_count) {
+		pr_err("dev[%p]: Unable to change SE Device PROT type while"
+		       " export_count is %d\n", dev, dev->export_count);
+		return -EINVAL;
+	}
+
+	da->pi_prot_type = flag;
+
+	if (flag && !old_prot) {
+		ret = dev->transport->init_prot(dev);
+		if (ret) {
+			da->pi_prot_type = old_prot;
+			return ret;
+		}
+
+	} else if (!flag && old_prot) {
+		dev->transport->free_prot(dev);
+	}
+
+	pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
+	return count;
+}
+
+static ssize_t pi_prot_format_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	struct se_device *dev = da->da_dev;
+	bool flag;
+	int ret;
+
+	ret = strtobool(page, &flag);
+	if (ret < 0)
+		return ret;
+
+	if (!flag)
+		return count;
+
+	if (!dev->transport->format_prot) {
+		pr_err("DIF protection format not supported by backend %s\n",
+		       dev->transport->name);
+		return -ENOSYS;
+	}
+	if (!(dev->dev_flags & DF_CONFIGURED)) {
+		pr_err("DIF protection format requires device to be configured\n");
+		return -ENODEV;
+	}
+	if (dev->export_count) {
+		pr_err("dev[%p]: Unable to format SE Device PROT type while"
+		       " export_count is %d\n", dev, dev->export_count);
+		return -EINVAL;
+	}
+
+	ret = dev->transport->format_prot(dev);
+	if (ret)
+		return ret;
+
+	pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
+	return count;
+}
+
+static ssize_t force_pr_aptpl_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	bool flag;
+	int ret;
+
+	ret = strtobool(page, &flag);
+	if (ret < 0)
+		return ret;
+	if (da->da_dev->export_count) {
+		pr_err("dev[%p]: Unable to set force_pr_aptpl while"
+		       " export_count is %d\n",
+		       da->da_dev, da->da_dev->export_count);
+		return -EINVAL;
+	}
+
+	da->force_pr_aptpl = flag;
+	pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
+	return count;
+}
+
+static ssize_t emulate_rest_reord_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	bool flag;
+	int ret;
+
+	ret = strtobool(page, &flag);
+	if (ret < 0)
+		return ret;
+
+	if (flag != 0) {
+		printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
+			" reordering not implemented\n", da->da_dev);
+		return -ENOSYS;
+	}
+	da->emulate_rest_reord = flag;
+	pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
+		da->da_dev, flag);
+	return count;
+}
+
+/*
+ * Note, this can only be called on unexported SE Device Object.
+ */
+static ssize_t queue_depth_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	struct se_device *dev = da->da_dev;
+	u32 val;
+	int ret;
+
+	ret = kstrtou32(page, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (dev->export_count) {
+		pr_err("dev[%p]: Unable to change SE Device TCQ while"
+			" export_count is %d\n",
+			dev, dev->export_count);
+		return -EINVAL;
+	}
+	if (!val) {
+		pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
+		return -EINVAL;
+	}
+
+	if (val > dev->dev_attrib.queue_depth) {
+		if (val > dev->dev_attrib.hw_queue_depth) {
+			pr_err("dev[%p]: Passed queue_depth:"
+				" %u exceeds TCM/SE_Device MAX"
+				" TCQ: %u\n", dev, val,
+				dev->dev_attrib.hw_queue_depth);
+			return -EINVAL;
+		}
+	}
+	da->queue_depth = dev->queue_depth = val;
+	pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
+	return count;
+}
+
+static ssize_t optimal_sectors_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	u32 val;
+	int ret;
+
+	ret = kstrtou32(page, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (da->da_dev->export_count) {
+		pr_err("dev[%p]: Unable to change SE Device"
+			" optimal_sectors while export_count is %d\n",
+			da->da_dev, da->da_dev->export_count);
+		return -EINVAL;
+	}
+	if (val > da->hw_max_sectors) {
+		pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
+			" greater than hw_max_sectors: %u\n",
+			da->da_dev, val, da->hw_max_sectors);
+		return -EINVAL;
+	}
+
+	da->optimal_sectors = val;
+	pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
+			da->da_dev, val);
+	return count;
+}
+
+static ssize_t block_size_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	u32 val;
+	int ret;
+
+	ret = kstrtou32(page, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (da->da_dev->export_count) {
+		pr_err("dev[%p]: Unable to change SE Device block_size"
+			" while export_count is %d\n",
+			da->da_dev, da->da_dev->export_count);
+		return -EINVAL;
+	}
+
+	if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
+		pr_err("dev[%p]: Illegal value for block_device: %u"
+			" for SE device, must be 512, 1024, 2048 or 4096\n",
+			da->da_dev, val);
+		return -EINVAL;
+	}
+
+	da->block_size = val;
+	if (da->max_bytes_per_io)
+		da->hw_max_sectors = da->max_bytes_per_io / val;
+
+	pr_debug("dev[%p]: SE Device block_size changed to %u\n",
+			da->da_dev, val);
+	return count;
+}
+
+CONFIGFS_ATTR(, emulate_model_alias);
+CONFIGFS_ATTR(, emulate_dpo);
+CONFIGFS_ATTR(, emulate_fua_write);
+CONFIGFS_ATTR(, emulate_fua_read);
+CONFIGFS_ATTR(, emulate_write_cache);
+CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
+CONFIGFS_ATTR(, emulate_tas);
+CONFIGFS_ATTR(, emulate_tpu);
+CONFIGFS_ATTR(, emulate_tpws);
+CONFIGFS_ATTR(, emulate_caw);
+CONFIGFS_ATTR(, emulate_3pc);
+CONFIGFS_ATTR(, pi_prot_type);
+CONFIGFS_ATTR_RO(, hw_pi_prot_type);
+CONFIGFS_ATTR(, pi_prot_format);
+CONFIGFS_ATTR(, enforce_pr_isids);
+CONFIGFS_ATTR(, is_nonrot);
+CONFIGFS_ATTR(, emulate_rest_reord);
+CONFIGFS_ATTR(, force_pr_aptpl);
+CONFIGFS_ATTR_RO(, hw_block_size);
+CONFIGFS_ATTR(, block_size);
+CONFIGFS_ATTR_RO(, hw_max_sectors);
+CONFIGFS_ATTR(, optimal_sectors);
+CONFIGFS_ATTR_RO(, hw_queue_depth);
+CONFIGFS_ATTR(, queue_depth);
+CONFIGFS_ATTR(, max_unmap_lba_count);
+CONFIGFS_ATTR(, max_unmap_block_desc_count);
+CONFIGFS_ATTR(, unmap_granularity);
+CONFIGFS_ATTR(, unmap_granularity_alignment);
+CONFIGFS_ATTR(, max_write_same_len);
+
+/*
+ * dev_attrib attributes for devices using the target core SBC/SPC
+ * interpreter.  Any backend using spc_parse_cdb should be using
+ * these.
+ */
+struct configfs_attribute *sbc_attrib_attrs[] = {
+	&attr_emulate_model_alias,
+	&attr_emulate_dpo,
+	&attr_emulate_fua_write,
+	&attr_emulate_fua_read,
+	&attr_emulate_write_cache,
+	&attr_emulate_ua_intlck_ctrl,
+	&attr_emulate_tas,
+	&attr_emulate_tpu,
+	&attr_emulate_tpws,
+	&attr_emulate_caw,
+	&attr_emulate_3pc,
+	&attr_pi_prot_type,
+	&attr_hw_pi_prot_type,
+	&attr_pi_prot_format,
+	&attr_enforce_pr_isids,
+	&attr_is_nonrot,
+	&attr_emulate_rest_reord,
+	&attr_force_pr_aptpl,
+	&attr_hw_block_size,
+	&attr_block_size,
+	&attr_hw_max_sectors,
+	&attr_optimal_sectors,
+	&attr_hw_queue_depth,
+	&attr_queue_depth,
+	&attr_max_unmap_lba_count,
+	&attr_max_unmap_block_desc_count,
+	&attr_unmap_granularity,
+	&attr_unmap_granularity_alignment,
+	&attr_max_write_same_len,
+	NULL,
+};
+EXPORT_SYMBOL(sbc_attrib_attrs);
+
+/*
+ * Minimal dev_attrib attributes for devices passing through CDBs.
+ * In this case we only provide a few read-only attributes for
+ * backwards compatibility.
+ */
+struct configfs_attribute *passthrough_attrib_attrs[] = {
+	&attr_hw_pi_prot_type,
+	&attr_hw_block_size,
+	&attr_hw_max_sectors,
+	&attr_hw_queue_depth,
+	NULL,
+};
+EXPORT_SYMBOL(passthrough_attrib_attrs);
+
+TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
+
+/* End functions for struct config_item_type tb_dev_attrib_cit */
+
+/*  Start functions for struct config_item_type tb_dev_wwn_cit */
+
+static struct t10_wwn *to_t10_wwn(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
+}
+
+/*
+ * VPD page 0x80 Unit serial
+ */
+static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
+		char *page)
+{
+	return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
+		&to_t10_wwn(item)->unit_serial[0]);
+}
+
+static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct t10_wwn *t10_wwn = to_t10_wwn(item);
+	struct se_device *dev = t10_wwn->t10_dev;
+	unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+
+	/*
+	 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
+	 * from the struct scsi_device level firmware, do not allow
+	 * VPD Unit Serial to be emulated.
+	 *
+	 * Note this struct scsi_device could also be emulating VPD
+	 * information from its drivers/scsi LLD.  But for now we assume
+	 * it is doing 'the right thing' wrt a world wide unique
+	 * VPD Unit Serial Number that OS dependent multipath can depend on.
+	 */
+	if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
+		pr_err("Underlying SCSI device firmware provided VPD"
+			" Unit Serial, ignoring request\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
+		pr_err("Emulated VPD Unit Serial exceeds"
+		" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
+		return -EOVERFLOW;
+	}
+	/*
+	 * Check to see if any active $FABRIC_MOD exports exist.  If they
+	 * do exist, fail here as changing this information on the fly
+	 * (underneath the initiator side OS dependent multipath code)
+	 * could cause negative effects.
+	 */
+	if (dev->export_count) {
+		pr_err("Unable to set VPD Unit Serial while"
+			" active %d $FABRIC_MOD exports exist\n",
+			dev->export_count);
+		return -EINVAL;
+	}
+
+	/*
+	 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
+	 *
+	 * Also, strip any newline added from the userspace
+	 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
+	 */
+	memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
+	snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
+	snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+			"%s", strstrip(buf));
+	dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
+
+	pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+			" %s\n", dev->t10_wwn.unit_serial);
+
+	return count;
+}
+
+/*
+ * VPD page 0x83 Protocol Identifier
+ */
+static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
+		char *page)
+{
+	struct t10_wwn *t10_wwn = to_t10_wwn(item);
+	struct t10_vpd *vpd;
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	ssize_t len = 0;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+	spin_lock(&t10_wwn->t10_vpd_lock);
+	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
+		if (!vpd->protocol_identifier_set)
+			continue;
+
+		transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
+
+		if (len + strlen(buf) >= PAGE_SIZE)
+			break;
+
+		len += sprintf(page+len, "%s", buf);
+	}
+	spin_unlock(&t10_wwn->t10_vpd_lock);
+
+	return len;
+}
+
+/*
+ * Generic wrapper for dumping VPD identifiers by association.
+ */
+#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc)				\
+static ssize_t target_wwn_##_name##_show(struct config_item *item,	\
+		char *page)						\
+{									\
+	struct t10_wwn *t10_wwn = to_t10_wwn(item);			\
+	struct t10_vpd *vpd;						\
+	unsigned char buf[VPD_TMP_BUF_SIZE];				\
+	ssize_t len = 0;						\
+									\
+	spin_lock(&t10_wwn->t10_vpd_lock);				\
+	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {	\
+		if (vpd->association != _assoc)				\
+			continue;					\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);	\
+		if (len + strlen(buf) >= PAGE_SIZE)			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
+		if (len + strlen(buf) >= PAGE_SIZE)			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
+		if (len + strlen(buf) >= PAGE_SIZE)			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+	}								\
+	spin_unlock(&t10_wwn->t10_vpd_lock);				\
+									\
+	return len;							\
+}
+
+/* VPD page 0x83 Association: Logical Unit */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
+/* VPD page 0x83 Association: Target Port */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
+/* VPD page 0x83 Association: SCSI Target Device */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
+
+CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
+CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
+CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
+CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
+CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
+
+static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
+	&target_wwn_attr_vpd_unit_serial,
+	&target_wwn_attr_vpd_protocol_identifier,
+	&target_wwn_attr_vpd_assoc_logical_unit,
+	&target_wwn_attr_vpd_assoc_target_port,
+	&target_wwn_attr_vpd_assoc_scsi_target_device,
+	NULL,
+};
+
+TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
+
+/*  End functions for struct config_item_type tb_dev_wwn_cit */
+
+/*  Start functions for struct config_item_type tb_dev_pr_cit */
+
+static struct se_device *pr_to_dev(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_device,
+			dev_pr_group);
+}
+
+static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
+		char *page)
+{
+	struct se_node_acl *se_nacl;
+	struct t10_pr_registration *pr_reg;
+	char i_buf[PR_REG_ISID_ID_LEN];
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+	pr_reg = dev->dev_pr_res_holder;
+	if (!pr_reg)
+		return sprintf(page, "No SPC-3 Reservation holder\n");
+
+	se_nacl = pr_reg->pr_reg_nacl;
+	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
+
+	return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
+		se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+		se_nacl->initiatorname, i_buf);
+}
+
+static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
+		char *page)
+{
+	struct se_node_acl *se_nacl;
+	ssize_t len;
+
+	se_nacl = dev->dev_reserved_node_acl;
+	if (se_nacl) {
+		len = sprintf(page,
+			      "SPC-2 Reservation: %s Initiator: %s\n",
+			      se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+			      se_nacl->initiatorname);
+	} else {
+		len = sprintf(page, "No SPC-2 Reservation holder\n");
+	}
+	return len;
+}
+
+static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = pr_to_dev(item);
+	int ret;
+
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+		return sprintf(page, "Passthrough\n");
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+		ret = target_core_dev_pr_show_spc2_res(dev, page);
+	else
+		ret = target_core_dev_pr_show_spc3_res(dev, page);
+	spin_unlock(&dev->dev_reservation_lock);
+	return ret;
+}
+
+static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
+		char *page)
+{
+	struct se_device *dev = pr_to_dev(item);
+	ssize_t len = 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_pr_res_holder) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+	} else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
+		len = sprintf(page, "SPC-3 Reservation: All Target"
+			" Ports registration\n");
+	} else {
+		len = sprintf(page, "SPC-3 Reservation: Single"
+			" Target Port registration\n");
+	}
+
+	spin_unlock(&dev->dev_reservation_lock);
+	return len;
+}
+
+static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
+		char *page)
+{
+	return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
+}
+
+
+static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
+		char *page)
+{
+	struct se_device *dev = pr_to_dev(item);
+	struct se_node_acl *se_nacl;
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg;
+	const struct target_core_fabric_ops *tfo;
+	ssize_t len = 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!pr_reg) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		goto out_unlock;
+	}
+
+	se_nacl = pr_reg->pr_reg_nacl;
+	se_tpg = se_nacl->se_tpg;
+	tfo = se_tpg->se_tpg_tfo;
+
+	len += sprintf(page+len, "SPC-3 Reservation: %s"
+		" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
+		tfo->tpg_get_wwn(se_tpg));
+	len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
+		" Identifier Tag: %hu %s Portal Group Tag: %hu"
+		" %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
+		tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
+		tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun);
+
+out_unlock:
+	spin_unlock(&dev->dev_reservation_lock);
+	return len;
+}
+
+
+static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
+		char *page)
+{
+	struct se_device *dev = pr_to_dev(item);
+	const struct target_core_fabric_ops *tfo;
+	struct t10_pr_registration *pr_reg;
+	unsigned char buf[384];
+	char i_buf[PR_REG_ISID_ID_LEN];
+	ssize_t len = 0;
+	int reg_count = 0;
+
+	len += sprintf(page+len, "SPC-3 PR Registrations:\n");
+
+	spin_lock(&dev->t10_pr.registration_lock);
+	list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
+			pr_reg_list) {
+
+		memset(buf, 0, 384);
+		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+		tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+		core_pr_dump_initiator_port(pr_reg, i_buf,
+					PR_REG_ISID_ID_LEN);
+		sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
+			tfo->get_fabric_name(),
+			pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
+			pr_reg->pr_res_generation);
+
+		if (len + strlen(buf) >= PAGE_SIZE)
+			break;
+
+		len += sprintf(page+len, "%s", buf);
+		reg_count++;
+	}
+	spin_unlock(&dev->t10_pr.registration_lock);
+
+	if (!reg_count)
+		len += sprintf(page+len, "None\n");
+
+	return len;
+}
+
+static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = pr_to_dev(item);
+	struct t10_pr_registration *pr_reg;
+	ssize_t len = 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (pr_reg) {
+		len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+			core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+	} else {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+	}
+
+	spin_unlock(&dev->dev_reservation_lock);
+	return len;
+}
+
+static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = pr_to_dev(item);
+
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+		return sprintf(page, "SPC_PASSTHROUGH\n");
+	else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+		return sprintf(page, "SPC2_RESERVATIONS\n");
+	else
+		return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
+}
+
+static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
+		char *page)
+{
+	struct se_device *dev = pr_to_dev(item);
+
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+		return 0;
+
+	return sprintf(page, "APTPL Bit Status: %s\n",
+		(dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
+}
+
+static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
+		char *page)
+{
+	struct se_device *dev = pr_to_dev(item);
+
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+		return 0;
+
+	return sprintf(page, "Ready to process PR APTPL metadata..\n");
+}
+
+enum {
+	Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
+	Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
+	Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
+	Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_initiator_fabric, "initiator_fabric=%s"},
+	{Opt_initiator_node, "initiator_node=%s"},
+	{Opt_initiator_sid, "initiator_sid=%s"},
+	{Opt_sa_res_key, "sa_res_key=%s"},
+	{Opt_res_holder, "res_holder=%d"},
+	{Opt_res_type, "res_type=%d"},
+	{Opt_res_scope, "res_scope=%d"},
+	{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
+	{Opt_mapped_lun, "mapped_lun=%lld"},
+	{Opt_target_fabric, "target_fabric=%s"},
+	{Opt_target_node, "target_node=%s"},
+	{Opt_tpgt, "tpgt=%d"},
+	{Opt_port_rtpi, "port_rtpi=%d"},
+	{Opt_target_lun, "target_lun=%lld"},
+	{Opt_err, NULL}
+};
+
+static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_device *dev = pr_to_dev(item);
+	unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
+	unsigned char *t_fabric = NULL, *t_port = NULL;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	unsigned long long tmp_ll;
+	u64 sa_res_key = 0;
+	u64 mapped_lun = 0, target_lun = 0;
+	int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
+	u16 tpgt = 0;
+	u8 type = 0;
+
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+		return count;
+	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+		return count;
+
+	if (dev->export_count) {
+		pr_debug("Unable to process APTPL metadata while"
+			" active fabric exports exist\n");
+		return -EINVAL;
+	}
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_initiator_fabric:
+			i_fabric = match_strdup(args);
+			if (!i_fabric) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			break;
+		case Opt_initiator_node:
+			i_port = match_strdup(args);
+			if (!i_port) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
+				pr_err("APTPL metadata initiator_node="
+					" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
+					PR_APTPL_MAX_IPORT_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_initiator_sid:
+			isid = match_strdup(args);
+			if (!isid) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			if (strlen(isid) >= PR_REG_ISID_LEN) {
+				pr_err("APTPL metadata initiator_isid"
+					"= exceeds PR_REG_ISID_LEN: %d\n",
+					PR_REG_ISID_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_sa_res_key:
+			ret = kstrtoull(args->from, 0, &tmp_ll);
+			if (ret < 0) {
+				pr_err("kstrtoull() failed for sa_res_key=\n");
+				goto out;
+			}
+			sa_res_key = (u64)tmp_ll;
+			break;
+		/*
+		 * PR APTPL Metadata for Reservation
+		 */
+		case Opt_res_holder:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			res_holder = arg;
+			break;
+		case Opt_res_type:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			type = (u8)arg;
+			break;
+		case Opt_res_scope:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			break;
+		case Opt_res_all_tg_pt:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			all_tg_pt = (int)arg;
+			break;
+		case Opt_mapped_lun:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			mapped_lun = (u64)arg;
+			break;
+		/*
+		 * PR APTPL Metadata for Target Port
+		 */
+		case Opt_target_fabric:
+			t_fabric = match_strdup(args);
+			if (!t_fabric) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			break;
+		case Opt_target_node:
+			t_port = match_strdup(args);
+			if (!t_port) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
+				pr_err("APTPL metadata target_node="
+					" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
+					PR_APTPL_MAX_TPORT_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_tpgt:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			tpgt = (u16)arg;
+			break;
+		case Opt_port_rtpi:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			break;
+		case Opt_target_lun:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			target_lun = (u64)arg;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!i_port || !t_port || !sa_res_key) {
+		pr_err("Illegal parameters for APTPL registration\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (res_holder && !(type)) {
+		pr_err("Illegal PR type: 0x%02x for reservation"
+				" holder\n", type);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
+			i_port, isid, mapped_lun, t_port, tpgt, target_lun,
+			res_holder, all_tg_pt, type);
+out:
+	kfree(i_fabric);
+	kfree(i_port);
+	kfree(isid);
+	kfree(t_fabric);
+	kfree(t_port);
+	kfree(orig);
+	return (ret == 0) ? count : ret;
+}
+
+
+CONFIGFS_ATTR_RO(target_pr_, res_holder);
+CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
+CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
+CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
+CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
+CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
+CONFIGFS_ATTR_RO(target_pr_, res_type);
+CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
+CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
+
+static struct configfs_attribute *target_core_dev_pr_attrs[] = {
+	&target_pr_attr_res_holder,
+	&target_pr_attr_res_pr_all_tgt_pts,
+	&target_pr_attr_res_pr_generation,
+	&target_pr_attr_res_pr_holder_tg_port,
+	&target_pr_attr_res_pr_registered_i_pts,
+	&target_pr_attr_res_pr_type,
+	&target_pr_attr_res_type,
+	&target_pr_attr_res_aptpl_active,
+	&target_pr_attr_res_aptpl_metadata,
+	NULL,
+};
+
+TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
+
+/*  End functions for struct config_item_type tb_dev_pr_cit */
+
+/*  Start functions for struct config_item_type tb_dev_cit */
+
+static inline struct se_device *to_device(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_device, dev_group);
+}
+
+static ssize_t target_dev_info_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_device(item);
+	int bl = 0;
+	ssize_t read_bytes = 0;
+
+	transport_dump_dev_state(dev, page, &bl);
+	read_bytes += bl;
+	read_bytes += dev->transport->show_configfs_dev_params(dev,
+			page+read_bytes);
+	return read_bytes;
+}
+
+static ssize_t target_dev_control_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_device *dev = to_device(item);
+
+	return dev->transport->set_configfs_dev_params(dev, page, count);
+}
+
+static ssize_t target_dev_alias_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_device(item);
+
+	if (!(dev->dev_flags & DF_USING_ALIAS))
+		return 0;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
+}
+
+static ssize_t target_dev_alias_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_device *dev = to_device(item);
+	struct se_hba *hba = dev->se_hba;
+	ssize_t read_bytes;
+
+	if (count > (SE_DEV_ALIAS_LEN-1)) {
+		pr_err("alias count: %d exceeds"
+			" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
+			SE_DEV_ALIAS_LEN-1);
+		return -EINVAL;
+	}
+
+	read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
+	if (!read_bytes)
+		return -EINVAL;
+	if (dev->dev_alias[read_bytes - 1] == '\n')
+		dev->dev_alias[read_bytes - 1] = '\0';
+
+	dev->dev_flags |= DF_USING_ALIAS;
+
+	pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&dev->dev_group.cg_item),
+		dev->dev_alias);
+
+	return read_bytes;
+}
+
+static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_device(item);
+
+	if (!(dev->dev_flags & DF_USING_UDEV_PATH))
+		return 0;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
+}
+
+static ssize_t target_dev_udev_path_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_device *dev = to_device(item);
+	struct se_hba *hba = dev->se_hba;
+	ssize_t read_bytes;
+
+	if (count > (SE_UDEV_PATH_LEN-1)) {
+		pr_err("udev_path count: %d exceeds"
+			" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
+			SE_UDEV_PATH_LEN-1);
+		return -EINVAL;
+	}
+
+	read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
+			"%s", page);
+	if (!read_bytes)
+		return -EINVAL;
+	if (dev->udev_path[read_bytes - 1] == '\n')
+		dev->udev_path[read_bytes - 1] = '\0';
+
+	dev->dev_flags |= DF_USING_UDEV_PATH;
+
+	pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&dev->dev_group.cg_item),
+		dev->udev_path);
+
+	return read_bytes;
+}
+
+static ssize_t target_dev_enable_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_device(item);
+
+	return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
+}
+
+static ssize_t target_dev_enable_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_device *dev = to_device(item);
+	char *ptr;
+	int ret;
+
+	ptr = strstr(page, "1");
+	if (!ptr) {
+		pr_err("For dev_enable ops, only valid value"
+				" is \"1\"\n");
+		return -EINVAL;
+	}
+
+	ret = target_configure_device(dev);
+	if (ret)
+		return ret;
+	return count;
+}
+
+static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_device(item);
+	struct config_item *lu_ci;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	ssize_t len = 0;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!lu_gp_mem)
+		return 0;
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if (lu_gp) {
+		lu_ci = &lu_gp->lu_gp_group.cg_item;
+		len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
+			config_item_name(lu_ci), lu_gp->lu_gp_id);
+	}
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	return len;
+}
+
+static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_device *dev = to_device(item);
+	struct se_hba *hba = dev->se_hba;
+	struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	unsigned char buf[LU_GROUP_NAME_BUF];
+	int move = 0;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!lu_gp_mem)
+		return count;
+
+	if (count > LU_GROUP_NAME_BUF) {
+		pr_err("ALUA LU Group Alias too large!\n");
+		return -EINVAL;
+	}
+	memset(buf, 0, LU_GROUP_NAME_BUF);
+	memcpy(buf, page, count);
+	/*
+	 * Any ALUA logical unit alias besides "NULL" means we will be
+	 * making a new group association.
+	 */
+	if (strcmp(strstrip(buf), "NULL")) {
+		/*
+		 * core_alua_get_lu_gp_by_name() will increment reference to
+		 * struct t10_alua_lu_gp.  This reference is released with
+		 * core_alua_get_lu_gp_by_name below().
+		 */
+		lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
+		if (!lu_gp_new)
+			return -ENODEV;
+	}
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if (lu_gp) {
+		/*
+		 * Clearing an existing lu_gp association, and replacing
+		 * with NULL
+		 */
+		if (!lu_gp_new) {
+			pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
+				" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
+				" %hu\n",
+				config_item_name(&hba->hba_group.cg_item),
+				config_item_name(&dev->dev_group.cg_item),
+				config_item_name(&lu_gp->lu_gp_group.cg_item),
+				lu_gp->lu_gp_id);
+
+			__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+			return count;
+		}
+		/*
+		 * Removing existing association of lu_gp_mem with lu_gp
+		 */
+		__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+		move = 1;
+	}
+	/*
+	 * Associate lu_gp_mem with lu_gp_new.
+	 */
+	__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+		" core/alua/lu_gps/%s, ID: %hu\n",
+		(move) ? "Moving" : "Adding",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&dev->dev_group.cg_item),
+		config_item_name(&lu_gp_new->lu_gp_group.cg_item),
+		lu_gp_new->lu_gp_id);
+
+	core_alua_put_lu_gp_from_name(lu_gp_new);
+	return count;
+}
+
+static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_device(item);
+	struct t10_alua_lba_map *map;
+	struct t10_alua_lba_map_member *mem;
+	char *b = page;
+	int bl = 0;
+	char state;
+
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	if (!list_empty(&dev->t10_alua.lba_map_list))
+	    bl += sprintf(b + bl, "%u %u\n",
+			  dev->t10_alua.lba_map_segment_size,
+			  dev->t10_alua.lba_map_segment_multiplier);
+	list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
+		bl += sprintf(b + bl, "%llu %llu",
+			      map->lba_map_first_lba, map->lba_map_last_lba);
+		list_for_each_entry(mem, &map->lba_map_mem_list,
+				    lba_map_mem_list) {
+			switch (mem->lba_map_mem_alua_state) {
+			case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+				state = 'O';
+				break;
+			case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+				state = 'A';
+				break;
+			case ALUA_ACCESS_STATE_STANDBY:
+				state = 'S';
+				break;
+			case ALUA_ACCESS_STATE_UNAVAILABLE:
+				state = 'U';
+				break;
+			default:
+				state = '.';
+				break;
+			}
+			bl += sprintf(b + bl, " %d:%c",
+				      mem->lba_map_mem_alua_pg_id, state);
+		}
+		bl += sprintf(b + bl, "\n");
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+	return bl;
+}
+
+static ssize_t target_dev_lba_map_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_device *dev = to_device(item);
+	struct t10_alua_lba_map *lba_map = NULL;
+	struct list_head lba_list;
+	char *map_entries, *ptr;
+	char state;
+	int pg_num = -1, pg;
+	int ret = 0, num = 0, pg_id, alua_state;
+	unsigned long start_lba = -1, end_lba = -1;
+	unsigned long segment_size = -1, segment_mult = -1;
+
+	map_entries = kstrdup(page, GFP_KERNEL);
+	if (!map_entries)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&lba_list);
+	while ((ptr = strsep(&map_entries, "\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		if (num == 0) {
+			if (sscanf(ptr, "%lu %lu\n",
+				   &segment_size, &segment_mult) != 2) {
+				pr_err("Invalid line %d\n", num);
+				ret = -EINVAL;
+				break;
+			}
+			num++;
+			continue;
+		}
+		if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
+			pr_err("Invalid line %d\n", num);
+			ret = -EINVAL;
+			break;
+		}
+		ptr = strchr(ptr, ' ');
+		if (!ptr) {
+			pr_err("Invalid line %d, missing end lba\n", num);
+			ret = -EINVAL;
+			break;
+		}
+		ptr++;
+		ptr = strchr(ptr, ' ');
+		if (!ptr) {
+			pr_err("Invalid line %d, missing state definitions\n",
+			       num);
+			ret = -EINVAL;
+			break;
+		}
+		ptr++;
+		lba_map = core_alua_allocate_lba_map(&lba_list,
+						     start_lba, end_lba);
+		if (IS_ERR(lba_map)) {
+			ret = PTR_ERR(lba_map);
+			break;
+		}
+		pg = 0;
+		while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
+			switch (state) {
+			case 'O':
+				alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
+				break;
+			case 'A':
+				alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
+				break;
+			case 'S':
+				alua_state = ALUA_ACCESS_STATE_STANDBY;
+				break;
+			case 'U':
+				alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
+				break;
+			default:
+				pr_err("Invalid ALUA state '%c'\n", state);
+				ret = -EINVAL;
+				goto out;
+			}
+
+			ret = core_alua_allocate_lba_map_mem(lba_map,
+							     pg_id, alua_state);
+			if (ret) {
+				pr_err("Invalid target descriptor %d:%c "
+				       "at line %d\n",
+				       pg_id, state, num);
+				break;
+			}
+			pg++;
+			ptr = strchr(ptr, ' ');
+			if (ptr)
+				ptr++;
+			else
+				break;
+		}
+		if (pg_num == -1)
+		    pg_num = pg;
+		else if (pg != pg_num) {
+			pr_err("Only %d from %d port groups definitions "
+			       "at line %d\n", pg, pg_num, num);
+			ret = -EINVAL;
+			break;
+		}
+		num++;
+	}
+out:
+	if (ret) {
+		core_alua_free_lba_map(&lba_list);
+		count = ret;
+	} else
+		core_alua_set_lba_map(dev, &lba_list,
+				      segment_size, segment_mult);
+	kfree(map_entries);
+	return count;
+}
+
+CONFIGFS_ATTR_RO(target_dev_, info);
+CONFIGFS_ATTR_WO(target_dev_, control);
+CONFIGFS_ATTR(target_dev_, alias);
+CONFIGFS_ATTR(target_dev_, udev_path);
+CONFIGFS_ATTR(target_dev_, enable);
+CONFIGFS_ATTR(target_dev_, alua_lu_gp);
+CONFIGFS_ATTR(target_dev_, lba_map);
+
+static struct configfs_attribute *target_core_dev_attrs[] = {
+	&target_dev_attr_info,
+	&target_dev_attr_control,
+	&target_dev_attr_alias,
+	&target_dev_attr_udev_path,
+	&target_dev_attr_enable,
+	&target_dev_attr_alua_lu_gp,
+	&target_dev_attr_lba_map,
+	NULL,
+};
+
+static void target_core_dev_release(struct config_item *item)
+{
+	struct config_group *dev_cg = to_config_group(item);
+	struct se_device *dev =
+		container_of(dev_cg, struct se_device, dev_group);
+
+	kfree(dev_cg->default_groups);
+	target_free_device(dev);
+}
+
+static struct configfs_item_operations target_core_dev_item_ops = {
+	.release		= target_core_dev_release,
+};
+
+TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
+
+/* End functions for struct config_item_type tb_dev_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct t10_alua_lu_gp,
+			lu_gp_group);
+}
+
+static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
+{
+	struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
+
+	if (!lu_gp->lu_gp_valid_id)
+		return 0;
+	return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
+}
+
+static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
+	struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
+	unsigned long lu_gp_id;
+	int ret;
+
+	ret = kstrtoul(page, 0, &lu_gp_id);
+	if (ret < 0) {
+		pr_err("kstrtoul() returned %d for"
+			" lu_gp_id\n", ret);
+		return ret;
+	}
+	if (lu_gp_id > 0x0000ffff) {
+		pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
+			" 0x0000ffff\n", lu_gp_id);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
+	if (ret < 0)
+		return -EINVAL;
+
+	pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s to ID: %hu\n",
+		config_item_name(&alua_lu_gp_cg->cg_item),
+		lu_gp->lu_gp_id);
+
+	return count;
+}
+
+static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
+{
+	struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
+	struct se_device *dev;
+	struct se_hba *hba;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	ssize_t len = 0, cur_len;
+	unsigned char buf[LU_GROUP_NAME_BUF];
+
+	memset(buf, 0, LU_GROUP_NAME_BUF);
+
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+		dev = lu_gp_mem->lu_gp_mem_dev;
+		hba = dev->se_hba;
+
+		cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
+			config_item_name(&hba->hba_group.cg_item),
+			config_item_name(&dev->dev_group.cg_item));
+		cur_len++; /* Extra byte for NULL terminator */
+
+		if ((cur_len + len) > PAGE_SIZE) {
+			pr_warn("Ran out of lu_gp_show_attr"
+				"_members buffer\n");
+			break;
+		}
+		memcpy(page+len, buf, cur_len);
+		len += cur_len;
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	return len;
+}
+
+CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
+CONFIGFS_ATTR_RO(target_lu_gp_, members);
+
+static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
+	&target_lu_gp_attr_lu_gp_id,
+	&target_lu_gp_attr_members,
+	NULL,
+};
+
+static void target_core_alua_lu_gp_release(struct config_item *item)
+{
+	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+			struct t10_alua_lu_gp, lu_gp_group);
+
+	core_alua_free_lu_gp(lu_gp);
+}
+
+static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+	.release		= target_core_alua_lu_gp_release,
+};
+
+static struct config_item_type target_core_alua_lu_gp_cit = {
+	.ct_item_ops		= &target_core_alua_lu_gp_ops,
+	.ct_attrs		= target_core_alua_lu_gp_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+static struct config_group *target_core_alua_create_lu_gp(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct config_group *alua_lu_gp_cg = NULL;
+	struct config_item *alua_lu_gp_ci = NULL;
+
+	lu_gp = core_alua_allocate_lu_gp(name, 0);
+	if (IS_ERR(lu_gp))
+		return NULL;
+
+	alua_lu_gp_cg = &lu_gp->lu_gp_group;
+	alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
+
+	config_group_init_type_name(alua_lu_gp_cg, name,
+			&target_core_alua_lu_gp_cit);
+
+	pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s\n",
+		config_item_name(alua_lu_gp_ci));
+
+	return alua_lu_gp_cg;
+
+}
+
+static void target_core_alua_drop_lu_gp(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+			struct t10_alua_lu_gp, lu_gp_group);
+
+	pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s, ID: %hu\n",
+		config_item_name(item), lu_gp->lu_gp_id);
+	/*
+	 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
+	 * -> target_core_alua_lu_gp_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
+	.make_group		= &target_core_alua_create_lu_gp,
+	.drop_item		= &target_core_alua_drop_lu_gp,
+};
+
+static struct config_item_type target_core_alua_lu_gps_cit = {
+	.ct_item_ops		= NULL,
+	.ct_group_ops		= &target_core_alua_lu_gps_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
+			tg_pt_gp_group);
+}
+
+static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
+		char *page)
+{
+	return sprintf(page, "%d\n",
+		atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state));
+}
+
+static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
+	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+	unsigned long tmp;
+	int new_state, ret;
+
+	if (!tg_pt_gp->tg_pt_gp_valid_id) {
+		pr_err("Unable to do implicit ALUA on non valid"
+			" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+		return -EINVAL;
+	}
+	if (!(dev->dev_flags & DF_CONFIGURED)) {
+		pr_err("Unable to set alua_access_state while device is"
+		       " not configured\n");
+		return -ENODEV;
+	}
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract new ALUA access state from"
+				" %s\n", page);
+		return ret;
+	}
+	new_state = (int)tmp;
+
+	if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
+		pr_err("Unable to process implicit configfs ALUA"
+			" transition while TPGS_IMPLICIT_ALUA is disabled\n");
+		return -EINVAL;
+	}
+	if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
+	    new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
+		/* LBA DEPENDENT is only allowed with implicit ALUA */
+		pr_err("Unable to process implicit configfs ALUA transition"
+		       " while explicit ALUA management is enabled\n");
+		return -EINVAL;
+	}
+
+	ret = core_alua_do_port_transition(tg_pt_gp, dev,
+					NULL, NULL, new_state, 0);
+	return (!ret) ? count : -EINVAL;
+}
+
+static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
+		char *page)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
+	return sprintf(page, "%s\n",
+		core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
+}
+
+static ssize_t target_tg_pt_gp_alua_access_status_store(
+		struct config_item *item, const char *page, size_t count)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
+	unsigned long tmp;
+	int new_status, ret;
+
+	if (!tg_pt_gp->tg_pt_gp_valid_id) {
+		pr_err("Unable to do set ALUA access status on non"
+			" valid tg_pt_gp ID: %hu\n",
+			tg_pt_gp->tg_pt_gp_valid_id);
+		return -EINVAL;
+	}
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract new ALUA access status"
+				" from %s\n", page);
+		return ret;
+	}
+	new_status = (int)tmp;
+
+	if ((new_status != ALUA_STATUS_NONE) &&
+	    (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+	    (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
+		pr_err("Illegal ALUA access status: 0x%02x\n",
+				new_status);
+		return -EINVAL;
+	}
+
+	tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
+	return count;
+}
+
+static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
+		char *page)
+{
+	return core_alua_show_access_type(to_tg_pt_gp(item), page);
+}
+
+static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
+}
+
+#define ALUA_SUPPORTED_STATE_ATTR(_name, _bit)				\
+static ssize_t target_tg_pt_gp_alua_support_##_name##_show(		\
+		struct config_item *item, char *p)			\
+{									\
+	struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item);		\
+	return sprintf(p, "%d\n",					\
+		!!(t->tg_pt_gp_alua_supported_states & _bit));		\
+}									\
+									\
+static ssize_t target_tg_pt_gp_alua_support_##_name##_store(		\
+		struct config_item *item, const char *p, size_t c)	\
+{									\
+	struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item);		\
+	unsigned long tmp;						\
+	int ret;							\
+									\
+	if (!t->tg_pt_gp_valid_id) {					\
+		pr_err("Unable to do set ##_name ALUA state on non"	\
+		       " valid tg_pt_gp ID: %hu\n",			\
+		       t->tg_pt_gp_valid_id);				\
+		return -EINVAL;						\
+	}								\
+									\
+	ret = kstrtoul(p, 0, &tmp);					\
+	if (ret < 0) {							\
+		pr_err("Invalid value '%s', must be '0' or '1'\n", p);	\
+		return -EINVAL;						\
+	}								\
+	if (tmp > 1) {							\
+		pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
+		return -EINVAL;						\
+	}								\
+	if (tmp)							\
+		t->tg_pt_gp_alua_supported_states |= _bit;		\
+	else								\
+		t->tg_pt_gp_alua_supported_states &= ~_bit;		\
+									\
+	return c;							\
+}
+
+ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
+ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
+ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
+ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
+ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
+ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
+ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
+
+static ssize_t target_tg_pt_gp_alua_write_metadata_show(
+		struct config_item *item, char *page)
+{
+	return sprintf(page, "%d\n",
+		to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
+}
+
+static ssize_t target_tg_pt_gp_alua_write_metadata_store(
+		struct config_item *item, const char *page, size_t count)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
+	unsigned long tmp;
+	int ret;
+
+	ret = kstrtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract alua_write_metadata\n");
+		return ret;
+	}
+
+	if ((tmp != 0) && (tmp != 1)) {
+		pr_err("Illegal value for alua_write_metadata:"
+			" %lu\n", tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
+
+	return count;
+}
+
+static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
+		char *page)
+{
+	return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
+}
+
+static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
+			count);
+}
+
+static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
+		char *page)
+{
+	return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
+}
+
+static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
+			count);
+}
+
+static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
+		struct config_item *item, char *page)
+{
+	return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
+}
+
+static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
+		struct config_item *item, const char *page, size_t count)
+{
+	return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
+			count);
+}
+
+static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
+		char *page)
+{
+	return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
+}
+
+static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
+}
+
+static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
+		char *page)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
+
+	if (!tg_pt_gp->tg_pt_gp_valid_id)
+		return 0;
+	return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
+}
+
+static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
+	struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+	unsigned long tg_pt_gp_id;
+	int ret;
+
+	ret = kstrtoul(page, 0, &tg_pt_gp_id);
+	if (ret < 0) {
+		pr_err("kstrtoul() returned %d for"
+			" tg_pt_gp_id\n", ret);
+		return ret;
+	}
+	if (tg_pt_gp_id > 0x0000ffff) {
+		pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
+			" 0x0000ffff\n", tg_pt_gp_id);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
+	if (ret < 0)
+		return -EINVAL;
+
+	pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
+		"core/alua/tg_pt_gps/%s to ID: %hu\n",
+		config_item_name(&alua_tg_pt_gp_cg->cg_item),
+		tg_pt_gp->tg_pt_gp_id);
+
+	return count;
+}
+
+static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
+		char *page)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
+	struct se_lun *lun;
+	ssize_t len = 0, cur_len;
+	unsigned char buf[TG_PT_GROUP_NAME_BUF];
+
+	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+			lun_tg_pt_gp_link) {
+		struct se_portal_group *tpg = lun->lun_tpg;
+
+		cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
+			"/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
+			tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+			tpg->se_tpg_tfo->tpg_get_tag(tpg),
+			config_item_name(&lun->lun_group.cg_item));
+		cur_len++; /* Extra byte for NULL terminator */
+
+		if ((cur_len + len) > PAGE_SIZE) {
+			pr_warn("Ran out of lu_gp_show_attr"
+				"_members buffer\n");
+			break;
+		}
+		memcpy(page+len, buf, cur_len);
+		len += cur_len;
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	return len;
+}
+
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
+CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
+CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
+CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
+CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
+CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
+CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
+
+static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
+	&target_tg_pt_gp_attr_alua_access_state,
+	&target_tg_pt_gp_attr_alua_access_status,
+	&target_tg_pt_gp_attr_alua_access_type,
+	&target_tg_pt_gp_attr_alua_support_transitioning,
+	&target_tg_pt_gp_attr_alua_support_offline,
+	&target_tg_pt_gp_attr_alua_support_lba_dependent,
+	&target_tg_pt_gp_attr_alua_support_unavailable,
+	&target_tg_pt_gp_attr_alua_support_standby,
+	&target_tg_pt_gp_attr_alua_support_active_nonoptimized,
+	&target_tg_pt_gp_attr_alua_support_active_optimized,
+	&target_tg_pt_gp_attr_alua_write_metadata,
+	&target_tg_pt_gp_attr_nonop_delay_msecs,
+	&target_tg_pt_gp_attr_trans_delay_msecs,
+	&target_tg_pt_gp_attr_implicit_trans_secs,
+	&target_tg_pt_gp_attr_preferred,
+	&target_tg_pt_gp_attr_tg_pt_gp_id,
+	&target_tg_pt_gp_attr_members,
+	NULL,
+};
+
+static void target_core_alua_tg_pt_gp_release(struct config_item *item)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+	core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
+static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+	.release		= target_core_alua_tg_pt_gp_release,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gp_cit = {
+	.ct_item_ops		= &target_core_alua_tg_pt_gp_ops,
+	.ct_attrs		= target_core_alua_tg_pt_gp_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
+
+static struct config_group *target_core_alua_create_tg_pt_gp(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua *alua = container_of(group, struct t10_alua,
+					alua_tg_pt_gps_group);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct config_group *alua_tg_pt_gp_cg = NULL;
+	struct config_item *alua_tg_pt_gp_ci = NULL;
+
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
+	if (!tg_pt_gp)
+		return NULL;
+
+	alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+	alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
+
+	config_group_init_type_name(alua_tg_pt_gp_cg, name,
+			&target_core_alua_tg_pt_gp_cit);
+
+	pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
+		" Group: alua/tg_pt_gps/%s\n",
+		config_item_name(alua_tg_pt_gp_ci));
+
+	return alua_tg_pt_gp_cg;
+}
+
+static void target_core_alua_drop_tg_pt_gp(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+	pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
+		" Group: alua/tg_pt_gps/%s, ID: %hu\n",
+		config_item_name(item), tg_pt_gp->tg_pt_gp_id);
+	/*
+	 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
+	 * -> target_core_alua_tg_pt_gp_release().
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
+	.make_group		= &target_core_alua_create_tg_pt_gp,
+	.drop_item		= &target_core_alua_drop_tg_pt_gp,
+};
+
+TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
+
+/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_cit */
+
+/*
+ * target_core_alua_cit is a ConfigFS group that lives under
+ * /sys/kernel/config/target/core/alua.  There are default groups
+ * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
+ * target_core_alua_cit in target_core_init_configfs() below.
+ */
+static struct config_item_type target_core_alua_cit = {
+	.ct_item_ops		= NULL,
+	.ct_attrs		= NULL,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_cit */
+
+/* Start functions for struct config_item_type tb_dev_stat_cit */
+
+static struct config_group *target_core_stat_mkdir(
+	struct config_group *group,
+	const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_stat_rmdir(
+	struct config_group *group,
+	struct config_item *item)
+{
+	return;
+}
+
+static struct configfs_group_operations target_core_stat_group_ops = {
+	.make_group		= &target_core_stat_mkdir,
+	.drop_item		= &target_core_stat_rmdir,
+};
+
+TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
+
+/* End functions for struct config_item_type tb_dev_stat_cit */
+
+/* Start functions for struct config_item_type target_core_hba_cit */
+
+static struct config_group *target_core_make_subdev(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct config_item *hba_ci = &group->cg_item;
+	struct se_hba *hba = item_to_hba(hba_ci);
+	struct target_backend *tb = hba->backend;
+	struct se_device *dev;
+	struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
+	struct config_group *dev_stat_grp = NULL;
+	int errno = -ENOMEM, ret;
+
+	ret = mutex_lock_interruptible(&hba->hba_access_mutex);
+	if (ret)
+		return ERR_PTR(ret);
+
+	dev = target_alloc_device(hba, name);
+	if (!dev)
+		goto out_unlock;
+
+	dev_cg = &dev->dev_group;
+
+	dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
+			GFP_KERNEL);
+	if (!dev_cg->default_groups)
+		goto out_free_device;
+
+	config_group_init_type_name(dev_cg, name, &tb->tb_dev_cit);
+	config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
+			&tb->tb_dev_attrib_cit);
+	config_group_init_type_name(&dev->dev_pr_group, "pr",
+			&tb->tb_dev_pr_cit);
+	config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
+			&tb->tb_dev_wwn_cit);
+	config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
+			"alua", &tb->tb_dev_alua_tg_pt_gps_cit);
+	config_group_init_type_name(&dev->dev_stat_grps.stat_group,
+			"statistics", &tb->tb_dev_stat_cit);
+
+	dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
+	dev_cg->default_groups[1] = &dev->dev_pr_group;
+	dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
+	dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
+	dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
+	dev_cg->default_groups[5] = NULL;
+	/*
+	 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
+	 */
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
+	if (!tg_pt_gp)
+		goto out_free_dev_cg_default_groups;
+	dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
+
+	tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
+	tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
+				GFP_KERNEL);
+	if (!tg_pt_gp_cg->default_groups) {
+		pr_err("Unable to allocate tg_pt_gp_cg->"
+				"default_groups\n");
+		goto out_free_tg_pt_gp;
+	}
+
+	config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
+			"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
+	tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
+	tg_pt_gp_cg->default_groups[1] = NULL;
+	/*
+	 * Add core/$HBA/$DEV/statistics/ default groups
+	 */
+	dev_stat_grp = &dev->dev_stat_grps.stat_group;
+	dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4,
+				GFP_KERNEL);
+	if (!dev_stat_grp->default_groups) {
+		pr_err("Unable to allocate dev_stat_grp->default_groups\n");
+		goto out_free_tg_pt_gp_cg_default_groups;
+	}
+	target_stat_setup_dev_default_groups(dev);
+
+	mutex_unlock(&hba->hba_access_mutex);
+	return dev_cg;
+
+out_free_tg_pt_gp_cg_default_groups:
+	kfree(tg_pt_gp_cg->default_groups);
+out_free_tg_pt_gp:
+	core_alua_free_tg_pt_gp(tg_pt_gp);
+out_free_dev_cg_default_groups:
+	kfree(dev_cg->default_groups);
+out_free_device:
+	target_free_device(dev);
+out_unlock:
+	mutex_unlock(&hba->hba_access_mutex);
+	return ERR_PTR(errno);
+}
+
+static void target_core_drop_subdev(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct config_group *dev_cg = to_config_group(item);
+	struct se_device *dev =
+		container_of(dev_cg, struct se_device, dev_group);
+	struct se_hba *hba;
+	struct config_item *df_item;
+	struct config_group *tg_pt_gp_cg, *dev_stat_grp;
+	int i;
+
+	hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
+
+	mutex_lock(&hba->hba_access_mutex);
+
+	dev_stat_grp = &dev->dev_stat_grps.stat_group;
+	for (i = 0; dev_stat_grp->default_groups[i]; i++) {
+		df_item = &dev_stat_grp->default_groups[i]->cg_item;
+		dev_stat_grp->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(dev_stat_grp->default_groups);
+
+	tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
+	for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
+		df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
+		tg_pt_gp_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(tg_pt_gp_cg->default_groups);
+	/*
+	 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
+	 * directly from target_core_alua_tg_pt_gp_release().
+	 */
+	dev->t10_alua.default_tg_pt_gp = NULL;
+
+	for (i = 0; dev_cg->default_groups[i]; i++) {
+		df_item = &dev_cg->default_groups[i]->cg_item;
+		dev_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	/*
+	 * se_dev is released from target_core_dev_item_ops->release()
+	 */
+	config_item_put(item);
+	mutex_unlock(&hba->hba_access_mutex);
+}
+
+static struct configfs_group_operations target_core_hba_group_ops = {
+	.make_group		= target_core_make_subdev,
+	.drop_item		= target_core_drop_subdev,
+};
+
+
+static inline struct se_hba *to_hba(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_hba, hba_group);
+}
+
+static ssize_t target_hba_info_show(struct config_item *item, char *page)
+{
+	struct se_hba *hba = to_hba(item);
+
+	return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
+			hba->hba_id, hba->backend->ops->name,
+			TARGET_CORE_VERSION);
+}
+
+static ssize_t target_hba_mode_show(struct config_item *item, char *page)
+{
+	struct se_hba *hba = to_hba(item);
+	int hba_mode = 0;
+
+	if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
+		hba_mode = 1;
+
+	return sprintf(page, "%d\n", hba_mode);
+}
+
+static ssize_t target_hba_mode_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_hba *hba = to_hba(item);
+	unsigned long mode_flag;
+	int ret;
+
+	if (hba->backend->ops->pmode_enable_hba == NULL)
+		return -EINVAL;
+
+	ret = kstrtoul(page, 0, &mode_flag);
+	if (ret < 0) {
+		pr_err("Unable to extract hba mode flag: %d\n", ret);
+		return ret;
+	}
+
+	if (hba->dev_count) {
+		pr_err("Unable to set hba_mode with active devices\n");
+		return -EINVAL;
+	}
+
+	ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
+	if (ret < 0)
+		return -EINVAL;
+	if (ret > 0)
+		hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+	else if (ret == 0)
+		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+
+	return count;
+}
+
+CONFIGFS_ATTR_RO(target_, hba_info);
+CONFIGFS_ATTR(target_, hba_mode);
+
+static void target_core_hba_release(struct config_item *item)
+{
+	struct se_hba *hba = container_of(to_config_group(item),
+				struct se_hba, hba_group);
+	core_delete_hba(hba);
+}
+
+static struct configfs_attribute *target_core_hba_attrs[] = {
+	&target_attr_hba_info,
+	&target_attr_hba_mode,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_hba_item_ops = {
+	.release		= target_core_hba_release,
+};
+
+static struct config_item_type target_core_hba_cit = {
+	.ct_item_ops		= &target_core_hba_item_ops,
+	.ct_group_ops		= &target_core_hba_group_ops,
+	.ct_attrs		= target_core_hba_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group *target_core_call_addhbatotarget(
+	struct config_group *group,
+	const char *name)
+{
+	char *se_plugin_str, *str, *str2;
+	struct se_hba *hba;
+	char buf[TARGET_CORE_NAME_MAX_LEN];
+	unsigned long plugin_dep_id = 0;
+	int ret;
+
+	memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
+	if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
+		pr_err("Passed *name strlen(): %d exceeds"
+			" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
+			TARGET_CORE_NAME_MAX_LEN);
+		return ERR_PTR(-ENAMETOOLONG);
+	}
+	snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
+
+	str = strstr(buf, "_");
+	if (!str) {
+		pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+		return ERR_PTR(-EINVAL);
+	}
+	se_plugin_str = buf;
+	/*
+	 * Special case for subsystem plugins that have "_" in their names.
+	 * Namely rd_direct and rd_mcp..
+	 */
+	str2 = strstr(str+1, "_");
+	if (str2) {
+		*str2 = '\0'; /* Terminate for *se_plugin_str */
+		str2++; /* Skip to start of plugin dependent ID */
+		str = str2;
+	} else {
+		*str = '\0'; /* Terminate for *se_plugin_str */
+		str++; /* Skip to start of plugin dependent ID */
+	}
+
+	ret = kstrtoul(str, 0, &plugin_dep_id);
+	if (ret < 0) {
+		pr_err("kstrtoul() returned %d for"
+				" plugin_dep_id\n", ret);
+		return ERR_PTR(ret);
+	}
+	/*
+	 * Load up TCM subsystem plugins if they have not already been loaded.
+	 */
+	transport_subsystem_check_init();
+
+	hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
+	if (IS_ERR(hba))
+		return ERR_CAST(hba);
+
+	config_group_init_type_name(&hba->hba_group, name,
+			&target_core_hba_cit);
+
+	return &hba->hba_group;
+}
+
+static void target_core_call_delhbafromtarget(
+	struct config_group *group,
+	struct config_item *item)
+{
+	/*
+	 * core_delete_hba() is called from target_core_hba_item_ops->release()
+	 * -> target_core_hba_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_group_ops = {
+	.make_group	= target_core_call_addhbatotarget,
+	.drop_item	= target_core_call_delhbafromtarget,
+};
+
+static struct config_item_type target_core_cit = {
+	.ct_item_ops	= NULL,
+	.ct_group_ops	= &target_core_group_ops,
+	.ct_attrs	= NULL,
+	.ct_owner	= THIS_MODULE,
+};
+
+/* Stop functions for struct config_item_type target_core_hba_cit */
+
+void target_setup_backend_cits(struct target_backend *tb)
+{
+	target_core_setup_dev_cit(tb);
+	target_core_setup_dev_attrib_cit(tb);
+	target_core_setup_dev_pr_cit(tb);
+	target_core_setup_dev_wwn_cit(tb);
+	target_core_setup_dev_alua_tg_pt_gps_cit(tb);
+	target_core_setup_dev_stat_cit(tb);
+}
+
+static int __init target_core_init_configfs(void)
+{
+	struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
+	struct config_group *lu_gp_cg = NULL;
+	struct configfs_subsystem *subsys = &target_core_fabrics;
+	struct t10_alua_lu_gp *lu_gp;
+	int ret;
+
+	pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
+		" Engine: %s on %s/%s on "UTS_RELEASE"\n",
+		TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
+
+	config_group_init(&subsys->su_group);
+	mutex_init(&subsys->su_mutex);
+
+	ret = init_se_kmem_caches();
+	if (ret < 0)
+		return ret;
+	/*
+	 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
+	 * and ALUA Logical Unit Group and Target Port Group infrastructure.
+	 */
+	target_cg = &subsys->su_group;
+	target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
+				GFP_KERNEL);
+	if (!target_cg->default_groups) {
+		pr_err("Unable to allocate target_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out_global;
+	}
+
+	config_group_init_type_name(&target_core_hbagroup,
+			"core", &target_core_cit);
+	target_cg->default_groups[0] = &target_core_hbagroup;
+	target_cg->default_groups[1] = NULL;
+	/*
+	 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
+	 */
+	hba_cg = &target_core_hbagroup;
+	hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
+				GFP_KERNEL);
+	if (!hba_cg->default_groups) {
+		pr_err("Unable to allocate hba_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out_global;
+	}
+	config_group_init_type_name(&alua_group,
+			"alua", &target_core_alua_cit);
+	hba_cg->default_groups[0] = &alua_group;
+	hba_cg->default_groups[1] = NULL;
+	/*
+	 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
+	 * groups under /sys/kernel/config/target/core/alua/
+	 */
+	alua_cg = &alua_group;
+	alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
+			GFP_KERNEL);
+	if (!alua_cg->default_groups) {
+		pr_err("Unable to allocate alua_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out_global;
+	}
+
+	config_group_init_type_name(&alua_lu_gps_group,
+			"lu_gps", &target_core_alua_lu_gps_cit);
+	alua_cg->default_groups[0] = &alua_lu_gps_group;
+	alua_cg->default_groups[1] = NULL;
+	/*
+	 * Add core/alua/lu_gps/default_lu_gp
+	 */
+	lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+	if (IS_ERR(lu_gp)) {
+		ret = -ENOMEM;
+		goto out_global;
+	}
+
+	lu_gp_cg = &alua_lu_gps_group;
+	lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
+			GFP_KERNEL);
+	if (!lu_gp_cg->default_groups) {
+		pr_err("Unable to allocate lu_gp_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out_global;
+	}
+
+	config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
+				&target_core_alua_lu_gp_cit);
+	lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
+	lu_gp_cg->default_groups[1] = NULL;
+	default_lu_gp = lu_gp;
+	/*
+	 * Register the target_core_mod subsystem with configfs.
+	 */
+	ret = configfs_register_subsystem(subsys);
+	if (ret < 0) {
+		pr_err("Error %d while registering subsystem %s\n",
+			ret, subsys->su_group.cg_item.ci_namebuf);
+		goto out_global;
+	}
+	pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
+		" Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
+		" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
+	/*
+	 * Register built-in RAMDISK subsystem logic for virtual LUN 0
+	 */
+	ret = rd_module_init();
+	if (ret < 0)
+		goto out;
+
+	ret = core_dev_setup_virtual_lun0();
+	if (ret < 0)
+		goto out;
+
+	ret = target_xcopy_setup_pt();
+	if (ret < 0)
+		goto out;
+
+	return 0;
+
+out:
+	configfs_unregister_subsystem(subsys);
+	core_dev_release_virtual_lun0();
+	rd_module_exit();
+out_global:
+	if (default_lu_gp) {
+		core_alua_free_lu_gp(default_lu_gp);
+		default_lu_gp = NULL;
+	}
+	if (lu_gp_cg)
+		kfree(lu_gp_cg->default_groups);
+	if (alua_cg)
+		kfree(alua_cg->default_groups);
+	if (hba_cg)
+		kfree(hba_cg->default_groups);
+	kfree(target_cg->default_groups);
+	release_se_kmem_caches();
+	return ret;
+}
+
+static void __exit target_core_exit_configfs(void)
+{
+	struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
+	struct config_item *item;
+	int i;
+
+	lu_gp_cg = &alua_lu_gps_group;
+	for (i = 0; lu_gp_cg->default_groups[i]; i++) {
+		item = &lu_gp_cg->default_groups[i]->cg_item;
+		lu_gp_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(lu_gp_cg->default_groups);
+	lu_gp_cg->default_groups = NULL;
+
+	alua_cg = &alua_group;
+	for (i = 0; alua_cg->default_groups[i]; i++) {
+		item = &alua_cg->default_groups[i]->cg_item;
+		alua_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(alua_cg->default_groups);
+	alua_cg->default_groups = NULL;
+
+	hba_cg = &target_core_hbagroup;
+	for (i = 0; hba_cg->default_groups[i]; i++) {
+		item = &hba_cg->default_groups[i]->cg_item;
+		hba_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(hba_cg->default_groups);
+	hba_cg->default_groups = NULL;
+	/*
+	 * We expect subsys->su_group.default_groups to be released
+	 * by configfs subsystem provider logic..
+	 */
+	configfs_unregister_subsystem(&target_core_fabrics);
+	kfree(target_core_fabrics.su_group.default_groups);
+
+	core_alua_free_lu_gp(default_lu_gp);
+	default_lu_gp = NULL;
+
+	pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
+			" Infrastructure\n");
+
+	core_dev_release_virtual_lun0();
+	rd_module_exit();
+	target_xcopy_release_pt();
+	release_se_kmem_caches();
+}
+
+MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(target_core_init_configfs);
+module_exit(target_core_exit_configfs);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
new file mode 100644
index 0000000..bb6a6c3
--- /dev/null
+++ b/drivers/target/target_core_device.c
@@ -0,0 +1,1121 @@
+/*******************************************************************************
+ * Filename:  target_core_device.c (based on iscsi_target_device.c)
+ *
+ * This file contains the TCM Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ * (c) Copyright 2003-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <linux/export.h>
+#include <asm/unaligned.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+DEFINE_MUTEX(g_device_mutex);
+LIST_HEAD(g_device_list);
+
+static struct se_hba *lun0_hba;
+/* not static, needed by tpg.c */
+struct se_device *g_lun0_dev;
+
+sense_reason_t
+transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
+{
+	struct se_lun *se_lun = NULL;
+	struct se_session *se_sess = se_cmd->se_sess;
+	struct se_node_acl *nacl = se_sess->se_node_acl;
+	struct se_dev_entry *deve;
+	sense_reason_t ret = TCM_NO_SENSE;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, unpacked_lun);
+	if (deve) {
+		atomic_long_inc(&deve->total_cmds);
+
+		if (se_cmd->data_direction == DMA_TO_DEVICE)
+			atomic_long_add(se_cmd->data_length,
+					&deve->write_bytes);
+		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+			atomic_long_add(se_cmd->data_length,
+					&deve->read_bytes);
+
+		se_lun = rcu_dereference(deve->se_lun);
+
+		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
+			se_lun = NULL;
+			goto out_unlock;
+		}
+
+		se_cmd->se_lun = rcu_dereference(deve->se_lun);
+		se_cmd->pr_res_key = deve->pr_res_key;
+		se_cmd->orig_fe_lun = unpacked_lun;
+		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+		se_cmd->lun_ref_active = true;
+
+		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+				" Access for 0x%08llx\n",
+				se_cmd->se_tfo->get_fabric_name(),
+				unpacked_lun);
+			rcu_read_unlock();
+			ret = TCM_WRITE_PROTECTED;
+			goto ref_dev;
+		}
+	}
+out_unlock:
+	rcu_read_unlock();
+
+	if (!se_lun) {
+		/*
+		 * Use the se_portal_group->tpg_virt_lun0 to allow for
+		 * REPORT_LUNS, et al to be returned when no active
+		 * MappedLUN=0 exists for this Initiator Port.
+		 */
+		if (unpacked_lun != 0) {
+			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+				" Access for 0x%08llx\n",
+				se_cmd->se_tfo->get_fabric_name(),
+				unpacked_lun);
+			return TCM_NON_EXISTENT_LUN;
+		}
+
+		se_lun = se_sess->se_tpg->tpg_virt_lun0;
+		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
+		se_cmd->orig_fe_lun = 0;
+		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+
+		percpu_ref_get(&se_lun->lun_ref);
+		se_cmd->lun_ref_active = true;
+
+		/*
+		 * Force WRITE PROTECT for virtual LUN 0
+		 */
+		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+		    (se_cmd->data_direction != DMA_NONE)) {
+			ret = TCM_WRITE_PROTECTED;
+			goto ref_dev;
+		}
+	}
+	/*
+	 * RCU reference protected by percpu se_lun->lun_ref taken above that
+	 * must drop to zero (including initial reference) before this se_lun
+	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
+	 * target_core_fabric_configfs.c:target_fabric_port_release
+	 */
+ref_dev:
+	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+	atomic_long_inc(&se_cmd->se_dev->num_cmds);
+
+	if (se_cmd->data_direction == DMA_TO_DEVICE)
+		atomic_long_add(se_cmd->data_length,
+				&se_cmd->se_dev->write_bytes);
+	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+		atomic_long_add(se_cmd->data_length,
+				&se_cmd->se_dev->read_bytes);
+
+	return ret;
+}
+EXPORT_SYMBOL(transport_lookup_cmd_lun);
+
+int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun = NULL;
+	struct se_session *se_sess = se_cmd->se_sess;
+	struct se_node_acl *nacl = se_sess->se_node_acl;
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+	unsigned long flags;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, unpacked_lun);
+	if (deve) {
+		se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
+		se_cmd->se_lun = rcu_dereference(deve->se_lun);
+		se_lun = rcu_dereference(deve->se_lun);
+		se_cmd->pr_res_key = deve->pr_res_key;
+		se_cmd->orig_fe_lun = unpacked_lun;
+	}
+	rcu_read_unlock();
+
+	if (!se_lun) {
+		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+			" Access for 0x%08llx\n",
+			se_cmd->se_tfo->get_fabric_name(),
+			unpacked_lun);
+		return -ENODEV;
+	}
+	/*
+	 * XXX: Add percpu se_lun->lun_ref reference count for TMR
+	 */
+	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+
+	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
+	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
+	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_lookup_tmr_lun);
+
+bool target_lun_is_rdonly(struct se_cmd *cmd)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_dev_entry *deve;
+	bool ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
+	ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(target_lun_is_rdonly);
+
+/*
+ * This function is called from core_scsi3_emulate_pro_register_and_move()
+ * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
+ * when a matching rtpi is found.
+ */
+struct se_dev_entry *core_get_se_deve_from_rtpi(
+	struct se_node_acl *nacl,
+	u16 rtpi)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_portal_group *tpg = nacl->se_tpg;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+		lun = rcu_dereference(deve->se_lun);
+		if (!lun) {
+			pr_err("%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				tpg->se_tpg_tfo->get_fabric_name());
+			continue;
+		}
+		if (lun->lun_rtpi != rtpi)
+			continue;
+
+		kref_get(&deve->pr_kref);
+		rcu_read_unlock();
+
+		return deve;
+	}
+	rcu_read_unlock();
+
+	return NULL;
+}
+
+void core_free_device_list_for_node(
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	struct se_dev_entry *deve;
+
+	mutex_lock(&nacl->lun_entry_mutex);
+	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+		struct se_lun *lun = rcu_dereference_check(deve->se_lun,
+					lockdep_is_held(&nacl->lun_entry_mutex));
+		core_disable_device_list_for_node(lun, deve, nacl, tpg);
+	}
+	mutex_unlock(&nacl->lun_entry_mutex);
+}
+
+void core_update_device_list_access(
+	u64 mapped_lun,
+	u32 lun_access,
+	struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+
+	mutex_lock(&nacl->lun_entry_mutex);
+	deve = target_nacl_find_deve(nacl, mapped_lun);
+	if (deve) {
+		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+		} else {
+			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+		}
+	}
+	mutex_unlock(&nacl->lun_entry_mutex);
+}
+
+/*
+ * Called with rcu_read_lock or nacl->device_list_lock held.
+ */
+struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
+{
+	struct se_dev_entry *deve;
+
+	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+		if (deve->mapped_lun == mapped_lun)
+			return deve;
+
+	return NULL;
+}
+EXPORT_SYMBOL(target_nacl_find_deve);
+
+void target_pr_kref_release(struct kref *kref)
+{
+	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
+						 pr_kref);
+	complete(&deve->pr_comp);
+}
+
+static void
+target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
+			     bool skip_new)
+{
+	struct se_dev_entry *tmp;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
+		if (skip_new && tmp == new)
+			continue;
+		core_scsi3_ua_allocate(tmp, 0x3F,
+				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
+	}
+	rcu_read_unlock();
+}
+
+int core_enable_device_list_for_node(
+	struct se_lun *lun,
+	struct se_lun_acl *lun_acl,
+	u64 mapped_lun,
+	u32 lun_access,
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	struct se_dev_entry *orig, *new;
+
+	new = kzalloc(sizeof(*new), GFP_KERNEL);
+	if (!new) {
+		pr_err("Unable to allocate se_dev_entry memory\n");
+		return -ENOMEM;
+	}
+
+	atomic_set(&new->ua_count, 0);
+	spin_lock_init(&new->ua_lock);
+	INIT_LIST_HEAD(&new->ua_list);
+	INIT_LIST_HEAD(&new->lun_link);
+
+	new->mapped_lun = mapped_lun;
+	kref_init(&new->pr_kref);
+	init_completion(&new->pr_comp);
+
+	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
+		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+	else
+		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+
+	new->creation_time = get_jiffies_64();
+	new->attach_count++;
+
+	mutex_lock(&nacl->lun_entry_mutex);
+	orig = target_nacl_find_deve(nacl, mapped_lun);
+	if (orig && orig->se_lun) {
+		struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
+					lockdep_is_held(&nacl->lun_entry_mutex));
+
+		if (orig_lun != lun) {
+			pr_err("Existing orig->se_lun doesn't match new lun"
+			       " for dynamic -> explicit NodeACL conversion:"
+				" %s\n", nacl->initiatorname);
+			mutex_unlock(&nacl->lun_entry_mutex);
+			kfree(new);
+			return -EINVAL;
+		}
+		if (orig->se_lun_acl != NULL) {
+			pr_warn_ratelimited("Detected existing explicit"
+				" se_lun_acl->se_lun_group reference for %s"
+				" mapped_lun: %llu, failing\n",
+				 nacl->initiatorname, mapped_lun);
+			mutex_unlock(&nacl->lun_entry_mutex);
+			kfree(new);
+			return -EINVAL;
+		}
+
+		rcu_assign_pointer(new->se_lun, lun);
+		rcu_assign_pointer(new->se_lun_acl, lun_acl);
+		hlist_del_rcu(&orig->link);
+		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
+		mutex_unlock(&nacl->lun_entry_mutex);
+
+		spin_lock(&lun->lun_deve_lock);
+		list_del(&orig->lun_link);
+		list_add_tail(&new->lun_link, &lun->lun_deve_list);
+		spin_unlock(&lun->lun_deve_lock);
+
+		kref_put(&orig->pr_kref, target_pr_kref_release);
+		wait_for_completion(&orig->pr_comp);
+
+		target_luns_data_has_changed(nacl, new, true);
+		kfree_rcu(orig, rcu_head);
+		return 0;
+	}
+
+	rcu_assign_pointer(new->se_lun, lun);
+	rcu_assign_pointer(new->se_lun_acl, lun_acl);
+	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
+	mutex_unlock(&nacl->lun_entry_mutex);
+
+	spin_lock(&lun->lun_deve_lock);
+	list_add_tail(&new->lun_link, &lun->lun_deve_list);
+	spin_unlock(&lun->lun_deve_lock);
+
+	target_luns_data_has_changed(nacl, new, true);
+	return 0;
+}
+
+/*
+ *	Called with se_node_acl->lun_entry_mutex held.
+ */
+void core_disable_device_list_for_node(
+	struct se_lun *lun,
+	struct se_dev_entry *orig,
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	/*
+	 * rcu_dereference_raw protected by se_lun->lun_group symlink
+	 * reference to se_device->dev_group.
+	 */
+	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+	/*
+	 * If the MappedLUN entry is being disabled, the entry in
+	 * lun->lun_deve_list must be removed now before clearing the
+	 * struct se_dev_entry pointers below as logic in
+	 * core_alua_do_transition_tg_pt() depends on these being present.
+	 *
+	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
+	 * that have not been explicitly converted to MappedLUNs ->
+	 * struct se_lun_acl, but we remove deve->lun_link from
+	 * lun->lun_deve_list. This also means that active UAs and
+	 * NodeACL context specific PR metadata for demo-mode
+	 * MappedLUN *deve will be released below..
+	 */
+	spin_lock(&lun->lun_deve_lock);
+	list_del(&orig->lun_link);
+	spin_unlock(&lun->lun_deve_lock);
+	/*
+	 * Disable struct se_dev_entry LUN ACL mapping
+	 */
+	core_scsi3_ua_release_all(orig);
+
+	hlist_del_rcu(&orig->link);
+	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
+	orig->lun_flags = 0;
+	orig->creation_time = 0;
+	orig->attach_count--;
+	/*
+	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
+	 * or REGISTER_AND_MOVE PR operation to complete.
+	 */
+	kref_put(&orig->pr_kref, target_pr_kref_release);
+	wait_for_completion(&orig->pr_comp);
+
+	rcu_assign_pointer(orig->se_lun, NULL);
+	rcu_assign_pointer(orig->se_lun_acl, NULL);
+
+	kfree_rcu(orig, rcu_head);
+
+	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
+	target_luns_data_has_changed(nacl, NULL, false);
+}
+
+/*      core_clear_lun_from_tpg():
+ *
+ *
+ */
+void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
+{
+	struct se_node_acl *nacl;
+	struct se_dev_entry *deve;
+
+	mutex_lock(&tpg->acl_node_mutex);
+	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
+
+		mutex_lock(&nacl->lun_entry_mutex);
+		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+			struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
+					lockdep_is_held(&nacl->lun_entry_mutex));
+
+			if (lun != tmp_lun)
+				continue;
+
+			core_disable_device_list_for_node(lun, deve, nacl, tpg);
+		}
+		mutex_unlock(&nacl->lun_entry_mutex);
+	}
+	mutex_unlock(&tpg->acl_node_mutex);
+}
+
+int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
+{
+	struct se_lun *tmp;
+
+	spin_lock(&dev->se_port_lock);
+	if (dev->export_count == 0x0000ffff) {
+		pr_warn("Reached dev->dev_port_count =="
+				" 0x0000ffff\n");
+		spin_unlock(&dev->se_port_lock);
+		return -ENOSPC;
+	}
+again:
+	/*
+	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
+	 * Here is the table from spc4r17 section 7.7.3.8.
+	 *
+	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
+	 *
+	 * Code      Description
+	 * 0h        Reserved
+	 * 1h        Relative port 1, historically known as port A
+	 * 2h        Relative port 2, historically known as port B
+	 * 3h to FFFFh    Relative port 3 through 65 535
+	 */
+	lun->lun_rtpi = dev->dev_rpti_counter++;
+	if (!lun->lun_rtpi)
+		goto again;
+
+	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
+		/*
+		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
+		 * for 16-bit wrap..
+		 */
+		if (lun->lun_rtpi == tmp->lun_rtpi)
+			goto again;
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return 0;
+}
+
+static void se_release_vpd_for_dev(struct se_device *dev)
+{
+	struct t10_vpd *vpd, *vpd_tmp;
+
+	spin_lock(&dev->t10_wwn.t10_vpd_lock);
+	list_for_each_entry_safe(vpd, vpd_tmp,
+			&dev->t10_wwn.t10_vpd_list, vpd_list) {
+		list_del(&vpd->vpd_list);
+		kfree(vpd);
+	}
+	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
+}
+
+static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
+{
+	u32 aligned_max_sectors;
+	u32 alignment;
+	/*
+	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
+	 * transport_allocate_data_tasks() operation.
+	 */
+	alignment = max(1ul, PAGE_SIZE / block_size);
+	aligned_max_sectors = rounddown(max_sectors, alignment);
+
+	if (max_sectors != aligned_max_sectors)
+		pr_info("Rounding down aligned max_sectors from %u to %u\n",
+			max_sectors, aligned_max_sectors);
+
+	return aligned_max_sectors;
+}
+
+int core_dev_add_lun(
+	struct se_portal_group *tpg,
+	struct se_device *dev,
+	struct se_lun *lun)
+{
+	int rc;
+
+	rc = core_tpg_add_lun(tpg, lun,
+				TRANSPORT_LUNFLAGS_READ_WRITE, dev);
+	if (rc < 0)
+		return rc;
+
+	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
+		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
+		tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
+	/*
+	 * Update LUN maps for dynamically added initiators when
+	 * generate_node_acl is enabled.
+	 */
+	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
+		struct se_node_acl *acl;
+
+		mutex_lock(&tpg->acl_node_mutex);
+		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+			if (acl->dynamic_node_acl &&
+			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
+			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
+				core_tpg_add_node_to_devs(acl, tpg, lun);
+			}
+		}
+		mutex_unlock(&tpg->acl_node_mutex);
+	}
+
+	return 0;
+}
+
+/*      core_dev_del_lun():
+ *
+ *
+ */
+void core_dev_del_lun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
+		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
+		tpg->se_tpg_tfo->get_fabric_name());
+
+	core_tpg_remove_lun(tpg, lun);
+}
+
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_node_acl *nacl,
+	u64 mapped_lun,
+	int *ret)
+{
+	struct se_lun_acl *lacl;
+
+	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
+		pr_err("%s InitiatorName exceeds maximum size.\n",
+			tpg->se_tpg_tfo->get_fabric_name());
+		*ret = -EOVERFLOW;
+		return NULL;
+	}
+	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
+	if (!lacl) {
+		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
+		*ret = -ENOMEM;
+		return NULL;
+	}
+
+	lacl->mapped_lun = mapped_lun;
+	lacl->se_lun_nacl = nacl;
+
+	return lacl;
+}
+
+int core_dev_add_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun_acl *lacl,
+	struct se_lun *lun,
+	u32 lun_access)
+{
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	/*
+	 * rcu_dereference_raw protected by se_lun->lun_group symlink
+	 * reference to se_device->dev_group.
+	 */
+	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+
+	if (!nacl)
+		return -EINVAL;
+
+	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
+	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
+		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+
+	lacl->se_lun = lun;
+
+	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
+			lun_access, nacl, tpg) < 0)
+		return -EINVAL;
+
+	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
+		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
+		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
+		nacl->initiatorname);
+	/*
+	 * Check to see if there are any existing persistent reservation APTPL
+	 * pre-registrations that need to be enabled for this LUN ACL..
+	 */
+	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
+					    lacl->mapped_lun);
+	return 0;
+}
+
+int core_dev_del_initiator_node_lun_acl(
+	struct se_lun *lun,
+	struct se_lun_acl *lacl)
+{
+	struct se_portal_group *tpg = lun->lun_tpg;
+	struct se_node_acl *nacl;
+	struct se_dev_entry *deve;
+
+	nacl = lacl->se_lun_nacl;
+	if (!nacl)
+		return -EINVAL;
+
+	mutex_lock(&nacl->lun_entry_mutex);
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (deve)
+		core_disable_device_list_for_node(lun, deve, nacl, tpg);
+	mutex_unlock(&nacl->lun_entry_mutex);
+
+	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
+		" InitiatorNode: %s Mapped LUN: %llu\n",
+		tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
+		nacl->initiatorname, lacl->mapped_lun);
+
+	return 0;
+}
+
+void core_dev_free_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun_acl *lacl)
+{
+	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg),
+		tpg->se_tpg_tfo->get_fabric_name(),
+		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
+
+	kfree(lacl);
+}
+
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+	struct t10_wwn *wwn = &dev->t10_wwn;
+	char buf[17];
+	int i, device_type;
+	/*
+	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+	 */
+	for (i = 0; i < 8; i++)
+		if (wwn->vendor[i] >= 0x20)
+			buf[i] = wwn->vendor[i];
+		else
+			buf[i] = ' ';
+	buf[i] = '\0';
+	pr_debug("  Vendor: %s\n", buf);
+
+	for (i = 0; i < 16; i++)
+		if (wwn->model[i] >= 0x20)
+			buf[i] = wwn->model[i];
+		else
+			buf[i] = ' ';
+	buf[i] = '\0';
+	pr_debug("  Model: %s\n", buf);
+
+	for (i = 0; i < 4; i++)
+		if (wwn->revision[i] >= 0x20)
+			buf[i] = wwn->revision[i];
+		else
+			buf[i] = ' ';
+	buf[i] = '\0';
+	pr_debug("  Revision: %s\n", buf);
+
+	device_type = dev->transport->get_device_type(dev);
+	pr_debug("  Type:   %s ", scsi_device_type(device_type));
+}
+
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+{
+	struct se_device *dev;
+	struct se_lun *xcopy_lun;
+
+	dev = hba->backend->ops->alloc_device(hba, name);
+	if (!dev)
+		return NULL;
+
+	dev->dev_link_magic = SE_DEV_LINK_MAGIC;
+	dev->se_hba = hba;
+	dev->transport = hba->backend->ops;
+	dev->prot_length = sizeof(struct t10_pi_tuple);
+	dev->hba_index = hba->hba_index;
+
+	INIT_LIST_HEAD(&dev->dev_list);
+	INIT_LIST_HEAD(&dev->dev_sep_list);
+	INIT_LIST_HEAD(&dev->dev_tmr_list);
+	INIT_LIST_HEAD(&dev->delayed_cmd_list);
+	INIT_LIST_HEAD(&dev->state_list);
+	INIT_LIST_HEAD(&dev->qf_cmd_list);
+	INIT_LIST_HEAD(&dev->g_dev_node);
+	spin_lock_init(&dev->execute_task_lock);
+	spin_lock_init(&dev->delayed_cmd_lock);
+	spin_lock_init(&dev->dev_reservation_lock);
+	spin_lock_init(&dev->se_port_lock);
+	spin_lock_init(&dev->se_tmr_lock);
+	spin_lock_init(&dev->qf_cmd_lock);
+	sema_init(&dev->caw_sem, 1);
+	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
+	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
+	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
+	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
+	spin_lock_init(&dev->t10_pr.registration_lock);
+	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
+	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
+	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
+	spin_lock_init(&dev->t10_alua.lba_map_lock);
+
+	dev->t10_wwn.t10_dev = dev;
+	dev->t10_alua.t10_dev = dev;
+
+	dev->dev_attrib.da_dev = dev;
+	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
+	dev->dev_attrib.emulate_dpo = 1;
+	dev->dev_attrib.emulate_fua_write = 1;
+	dev->dev_attrib.emulate_fua_read = 1;
+	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+	dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
+	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
+	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
+	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
+	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
+	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
+	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
+	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
+	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+	dev->dev_attrib.max_unmap_block_desc_count =
+		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+	dev->dev_attrib.unmap_granularity_alignment =
+				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
+
+	xcopy_lun = &dev->xcopy_lun;
+	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
+	init_completion(&xcopy_lun->lun_ref_comp);
+	init_completion(&xcopy_lun->lun_shutdown_comp);
+	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
+	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
+	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
+	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
+
+	return dev;
+}
+
+/*
+ * Check if the underlying struct block_device request_queue supports
+ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+ * in ATA and we need to set TPE=1
+ */
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+				       struct request_queue *q)
+{
+	int block_size = queue_logical_block_size(q);
+
+	if (!blk_queue_discard(q))
+		return false;
+
+	attrib->max_unmap_lba_count =
+		q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
+	/*
+	 * Currently hardcoded to 1 in Linux/SCSI code..
+	 */
+	attrib->max_unmap_block_desc_count = 1;
+	attrib->unmap_granularity = q->limits.discard_granularity / block_size;
+	attrib->unmap_granularity_alignment = q->limits.discard_alignment /
+								block_size;
+	return true;
+}
+EXPORT_SYMBOL(target_configure_unmap_from_queue);
+
+/*
+ * Convert from blocksize advertised to the initiator to the 512 byte
+ * units unconditionally used by the Linux block layer.
+ */
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
+{
+	switch (dev->dev_attrib.block_size) {
+	case 4096:
+		return lb << 3;
+	case 2048:
+		return lb << 2;
+	case 1024:
+		return lb << 1;
+	default:
+		return lb;
+	}
+}
+EXPORT_SYMBOL(target_to_linux_sector);
+
+int target_configure_device(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+	int ret;
+
+	if (dev->dev_flags & DF_CONFIGURED) {
+		pr_err("se_dev->se_dev_ptr already set for storage"
+				" object\n");
+		return -EEXIST;
+	}
+
+	ret = dev->transport->configure_device(dev);
+	if (ret)
+		goto out;
+	/*
+	 * XXX: there is not much point to have two different values here..
+	 */
+	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
+	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
+
+	/*
+	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
+	 */
+	dev->dev_attrib.hw_max_sectors =
+		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
+					 dev->dev_attrib.hw_block_size);
+	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
+
+	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+	dev->creation_time = get_jiffies_64();
+
+	ret = core_setup_alua(dev);
+	if (ret)
+		goto out;
+
+	/*
+	 * Startup the struct se_device processing thread
+	 */
+	dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
+				      dev->transport->name);
+	if (!dev->tmr_wq) {
+		pr_err("Unable to create tmr workqueue for %s\n",
+			dev->transport->name);
+		ret = -ENOMEM;
+		goto out_free_alua;
+	}
+
+	/*
+	 * Setup work_queue for QUEUE_FULL
+	 */
+	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
+
+	/*
+	 * Preload the initial INQUIRY const values if we are doing
+	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+	 * passthrough because this is being provided by the backend LLD.
+	 */
+	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
+		strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
+		strncpy(&dev->t10_wwn.model[0],
+			dev->transport->inquiry_prod, 16);
+		strncpy(&dev->t10_wwn.revision[0],
+			dev->transport->inquiry_rev, 4);
+	}
+
+	scsi_dump_inquiry(dev);
+
+	spin_lock(&hba->device_lock);
+	hba->dev_count++;
+	spin_unlock(&hba->device_lock);
+
+	mutex_lock(&g_device_mutex);
+	list_add_tail(&dev->g_dev_node, &g_device_list);
+	mutex_unlock(&g_device_mutex);
+
+	dev->dev_flags |= DF_CONFIGURED;
+
+	return 0;
+
+out_free_alua:
+	core_alua_free_lu_gp_mem(dev);
+out:
+	se_release_vpd_for_dev(dev);
+	return ret;
+}
+
+void target_free_device(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	WARN_ON(!list_empty(&dev->dev_sep_list));
+
+	if (dev->dev_flags & DF_CONFIGURED) {
+		destroy_workqueue(dev->tmr_wq);
+
+		mutex_lock(&g_device_mutex);
+		list_del(&dev->g_dev_node);
+		mutex_unlock(&g_device_mutex);
+
+		spin_lock(&hba->device_lock);
+		hba->dev_count--;
+		spin_unlock(&hba->device_lock);
+	}
+
+	core_alua_free_lu_gp_mem(dev);
+	core_alua_set_lba_map(dev, NULL, 0, 0);
+	core_scsi3_free_all_registrations(dev);
+	se_release_vpd_for_dev(dev);
+
+	if (dev->transport->free_prot)
+		dev->transport->free_prot(dev);
+
+	dev->transport->free_device(dev);
+}
+
+int core_dev_setup_virtual_lun0(void)
+{
+	struct se_hba *hba;
+	struct se_device *dev;
+	char buf[] = "rd_pages=8,rd_nullio=1";
+	int ret;
+
+	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
+	if (IS_ERR(hba))
+		return PTR_ERR(hba);
+
+	dev = target_alloc_device(hba, "virt_lun0");
+	if (!dev) {
+		ret = -ENOMEM;
+		goto out_free_hba;
+	}
+
+	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
+
+	ret = target_configure_device(dev);
+	if (ret)
+		goto out_free_se_dev;
+
+	lun0_hba = hba;
+	g_lun0_dev = dev;
+	return 0;
+
+out_free_se_dev:
+	target_free_device(dev);
+out_free_hba:
+	core_delete_hba(hba);
+	return ret;
+}
+
+
+void core_dev_release_virtual_lun0(void)
+{
+	struct se_hba *hba = lun0_hba;
+
+	if (!hba)
+		return;
+
+	if (g_lun0_dev)
+		target_free_device(g_lun0_dev);
+	core_delete_hba(hba);
+}
+
+/*
+ * Common CDB parsing for kernel and user passthrough.
+ */
+sense_reason_t
+passthrough_parse_cdb(struct se_cmd *cmd,
+	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
+{
+	unsigned char *cdb = cmd->t_task_cdb;
+
+	/*
+	 * Clear a lun set in the cdb if the initiator talking to use spoke
+	 * and old standards version, as we can't assume the underlying device
+	 * won't choke up on it.
+	 */
+	switch (cdb[0]) {
+	case READ_10: /* SBC - RDProtect */
+	case READ_12: /* SBC - RDProtect */
+	case READ_16: /* SBC - RDProtect */
+	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+	case VERIFY: /* SBC - VRProtect */
+	case VERIFY_16: /* SBC - VRProtect */
+	case WRITE_VERIFY: /* SBC - VRProtect */
+	case WRITE_VERIFY_12: /* SBC - VRProtect */
+	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+		break;
+	default:
+		cdb[1] &= 0x1f; /* clear logical unit number */
+		break;
+	}
+
+	/*
+	 * For REPORT LUNS we always need to emulate the response, for everything
+	 * else, pass it up.
+	 */
+	if (cdb[0] == REPORT_LUNS) {
+		cmd->execute_cmd = spc_emulate_report_luns;
+		return TCM_NO_SENSE;
+	}
+
+	/* Set DATA_CDB flag for ops that should have it */
+	switch (cdb[0]) {
+	case READ_6:
+	case READ_10:
+	case READ_12:
+	case READ_16:
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_12:
+	case WRITE_16:
+	case WRITE_VERIFY:
+	case WRITE_VERIFY_12:
+	case 0x8e: /* WRITE_VERIFY_16 */
+	case COMPARE_AND_WRITE:
+	case XDWRITEREAD_10:
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+		break;
+	case VARIABLE_LENGTH_CMD:
+		switch (get_unaligned_be16(&cdb[8])) {
+		case READ_32:
+		case WRITE_32:
+		case 0x0c: /* WRITE_VERIFY_32 */
+		case XDWRITEREAD_32:
+			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+			break;
+		}
+	}
+
+	cmd->execute_cmd = exec_cmd;
+
+	return TCM_NO_SENSE;
+}
+EXPORT_SYMBOL(passthrough_parse_cdb);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
new file mode 100644
index 0000000..b070ddf
--- /dev/null
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -0,0 +1,1100 @@
+/*******************************************************************************
+* Filename: target_core_fabric_configfs.c
+ *
+ * This file contains generic fabric module configfs infrastructure for
+ * TCM v4.x code
+ *
+ * (c) Copyright 2010-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+
+#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)		\
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{									\
+	struct config_item_type *cit = &tf->tf_##_name##_cit;		\
+									\
+	cit->ct_item_ops = _item_ops;					\
+	cit->ct_group_ops = _group_ops;					\
+	cit->ct_attrs = _attrs;						\
+	cit->ct_owner = tf->tf_ops->module;				\
+	pr_debug("Setup generic %s\n", __stringify(_name));		\
+}
+
+#define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops)		\
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{									\
+	struct config_item_type *cit = &tf->tf_##_name##_cit;		\
+	struct configfs_attribute **attrs = tf->tf_ops->tfc_##_name##_attrs; \
+									\
+	cit->ct_item_ops = _item_ops;					\
+	cit->ct_group_ops = _group_ops;					\
+	cit->ct_attrs = attrs;						\
+	cit->ct_owner = tf->tf_ops->module;				\
+	pr_debug("Setup generic %s\n", __stringify(_name));		\
+}
+
+/* Start of tfc_tpg_mappedlun_cit */
+
+static int target_fabric_mappedlun_link(
+	struct config_item *lun_acl_ci,
+	struct config_item *lun_ci)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+			struct se_lun, lun_group);
+	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+			struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg;
+	struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
+	int lun_access;
+
+	if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
+		pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
+			" %p to struct lun: %p\n", lun_ci, lun);
+		return -EFAULT;
+	}
+	/*
+	 * Ensure that the source port exists
+	 */
+	if (!lun->lun_se_dev) {
+		pr_err("Source se_lun->lun_se_dev does not exist\n");
+		return -EINVAL;
+	}
+	if (lun->lun_shutdown) {
+		pr_err("Unable to create mappedlun symlink because"
+			" lun->lun_shutdown=true\n");
+		return -EINVAL;
+	}
+	se_tpg = lun->lun_tpg;
+
+	nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
+	tpg_ci = &nacl_ci->ci_group->cg_item;
+	wwn_ci = &tpg_ci->ci_group->cg_item;
+	tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
+	wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
+	/*
+	 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
+	 */
+	if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
+		pr_err("Illegal Initiator ACL SymLink outside of %s\n",
+			config_item_name(wwn_ci));
+		return -EINVAL;
+	}
+	if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
+		pr_err("Illegal Initiator ACL Symlink outside of %s"
+			" TPGT: %s\n", config_item_name(wwn_ci),
+			config_item_name(tpg_ci));
+		return -EINVAL;
+	}
+	/*
+	 * If this struct se_node_acl was dynamically generated with
+	 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
+	 * which be will write protected (READ-ONLY) when
+	 * tpg_1/attrib/demo_mode_write_protect=1
+	 */
+	rcu_read_lock();
+	deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
+	if (deve)
+		lun_access = deve->lun_flags;
+	else
+		lun_access =
+			(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
+				se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
+					   TRANSPORT_LUNFLAGS_READ_WRITE;
+	rcu_read_unlock();
+	/*
+	 * Determine the actual mapped LUN value user wants..
+	 *
+	 * This value is what the SCSI Initiator actually sees the
+	 * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
+	 */
+	return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access);
+}
+
+static int target_fabric_mappedlun_unlink(
+	struct config_item *lun_acl_ci,
+	struct config_item *lun_ci)
+{
+	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+			struct se_lun_acl, se_lun_group);
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+			struct se_lun, lun_group);
+
+	return core_dev_del_initiator_node_lun_acl(lun, lacl);
+}
+
+static struct se_lun_acl *item_to_lun_acl(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_lun_acl,
+			se_lun_group);
+}
+
+static ssize_t target_fabric_mappedlun_write_protect_show(
+		struct config_item *item, char *page)
+{
+	struct se_lun_acl *lacl = item_to_lun_acl(item);
+	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t len = 0;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
+	if (deve) {
+		len = sprintf(page, "%d\n",
+			(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
+	}
+	rcu_read_unlock();
+
+	return len;
+}
+
+static ssize_t target_fabric_mappedlun_write_protect_store(
+		struct config_item *item, const char *page, size_t count)
+{
+	struct se_lun_acl *lacl = item_to_lun_acl(item);
+	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	unsigned long op;
+	int ret;
+
+	ret = kstrtoul(page, 0, &op);
+	if (ret)
+		return ret;
+
+	if ((op != 1) && (op != 0))
+		return -EINVAL;
+
+	core_update_device_list_access(lacl->mapped_lun, (op) ?
+			TRANSPORT_LUNFLAGS_READ_ONLY :
+			TRANSPORT_LUNFLAGS_READ_WRITE,
+			lacl->se_lun_nacl);
+
+	pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
+		" Mapped LUN: %llu Write Protect bit to %s\n",
+		se_tpg->se_tpg_tfo->get_fabric_name(),
+		se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+
+	return count;
+
+}
+
+CONFIGFS_ATTR(target_fabric_mappedlun_, write_protect);
+
+static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
+	&target_fabric_mappedlun_attr_write_protect,
+	NULL,
+};
+
+static void target_fabric_mappedlun_release(struct config_item *item)
+{
+	struct se_lun_acl *lacl = container_of(to_config_group(item),
+				struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+	core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+	.release		= target_fabric_mappedlun_release,
+	.allow_link		= target_fabric_mappedlun_link,
+	.drop_link		= target_fabric_mappedlun_unlink,
+};
+
+TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
+		target_fabric_mappedlun_attrs);
+
+/* End of tfc_tpg_mappedlun_cit */
+
+/* Start of tfc_tpg_mappedlun_port_cit */
+
+static struct config_group *target_core_mappedlun_stat_mkdir(
+	struct config_group *group,
+	const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_mappedlun_stat_rmdir(
+	struct config_group *group,
+	struct config_item *item)
+{
+	return;
+}
+
+static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = {
+	.make_group		= target_core_mappedlun_stat_mkdir,
+	.drop_item		= target_core_mappedlun_stat_rmdir,
+};
+
+TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops,
+		NULL);
+
+/* End of tfc_tpg_mappedlun_port_cit */
+
+TF_CIT_SETUP_DRV(tpg_nacl_attrib, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_auth, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_param, NULL, NULL);
+
+/* Start of tfc_tpg_nacl_base_cit */
+
+static struct config_group *target_fabric_make_mappedlun(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_node_acl *se_nacl = container_of(group,
+			struct se_node_acl, acl_group);
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_lun_acl *lacl = NULL;
+	struct config_item *acl_ci;
+	struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
+	char *buf;
+	unsigned long long mapped_lun;
+	int ret = 0;
+
+	acl_ci = &group->cg_item;
+	if (!acl_ci) {
+		pr_err("Unable to locatel acl_ci\n");
+		return NULL;
+	}
+
+	buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
+	if (!buf) {
+		pr_err("Unable to allocate memory for name buf\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	snprintf(buf, strlen(name) + 1, "%s", name);
+	/*
+	 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
+	 */
+	if (strstr(buf, "lun_") != buf) {
+		pr_err("Unable to locate \"lun_\" from buf: %s"
+			" name: %s\n", buf, name);
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * Determine the Mapped LUN value.  This is what the SCSI Initiator
+	 * Port will actually see.
+	 */
+	ret = kstrtoull(buf + 4, 0, &mapped_lun);
+	if (ret)
+		goto out;
+
+	lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
+			mapped_lun, &ret);
+	if (!lacl) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	lacl_cg = &lacl->se_lun_group;
+	lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
+				GFP_KERNEL);
+	if (!lacl_cg->default_groups) {
+		pr_err("Unable to allocate lacl_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	config_group_init_type_name(&lacl->se_lun_group, name,
+			&tf->tf_tpg_mappedlun_cit);
+	config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
+			"statistics", &tf->tf_tpg_mappedlun_stat_cit);
+	lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
+	lacl_cg->default_groups[1] = NULL;
+
+	ml_stat_grp = &lacl->ml_stat_grps.stat_group;
+	ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
+				GFP_KERNEL);
+	if (!ml_stat_grp->default_groups) {
+		pr_err("Unable to allocate ml_stat_grp->default_groups\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	target_stat_setup_mappedlun_default_groups(lacl);
+
+	kfree(buf);
+	return &lacl->se_lun_group;
+out:
+	if (lacl_cg)
+		kfree(lacl_cg->default_groups);
+	kfree(lacl);
+	kfree(buf);
+	return ERR_PTR(ret);
+}
+
+static void target_fabric_drop_mappedlun(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_lun_acl *lacl = container_of(to_config_group(item),
+			struct se_lun_acl, se_lun_group);
+	struct config_item *df_item;
+	struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
+	int i;
+
+	ml_stat_grp = &lacl->ml_stat_grps.stat_group;
+	for (i = 0; ml_stat_grp->default_groups[i]; i++) {
+		df_item = &ml_stat_grp->default_groups[i]->cg_item;
+		ml_stat_grp->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(ml_stat_grp->default_groups);
+
+	lacl_cg = &lacl->se_lun_group;
+	for (i = 0; lacl_cg->default_groups[i]; i++) {
+		df_item = &lacl_cg->default_groups[i]->cg_item;
+		lacl_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(lacl_cg->default_groups);
+
+	config_item_put(item);
+}
+
+static void target_fabric_nacl_base_release(struct config_item *item)
+{
+	struct se_node_acl *se_nacl = container_of(to_config_group(item),
+			struct se_node_acl, acl_group);
+	struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (tf->tf_ops->fabric_cleanup_nodeacl)
+		tf->tf_ops->fabric_cleanup_nodeacl(se_nacl);
+	core_tpg_del_initiator_node_acl(se_nacl);
+}
+
+static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+	.release		= target_fabric_nacl_base_release,
+};
+
+static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
+	.make_group		= target_fabric_make_mappedlun,
+	.drop_item		= target_fabric_drop_mappedlun,
+};
+
+TF_CIT_SETUP_DRV(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
+		&target_fabric_nacl_base_group_ops);
+
+/* End of tfc_tpg_nacl_base_cit */
+
+/* Start of tfc_node_fabric_stats_cit */
+/*
+ * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group
+ * to allow fabrics access to ->acl_fabric_stat_group->default_groups[]
+ */
+TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL);
+
+/* End of tfc_wwn_fabric_stats_cit */
+
+/* Start of tfc_tpg_nacl_cit */
+
+static struct config_group *target_fabric_make_nodeacl(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_acl_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_node_acl *se_nacl;
+	struct config_group *nacl_cg;
+
+	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, name);
+	if (IS_ERR(se_nacl))
+		return ERR_CAST(se_nacl);
+
+	if (tf->tf_ops->fabric_init_nodeacl) {
+		int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
+		if (ret) {
+			core_tpg_del_initiator_node_acl(se_nacl);
+			return ERR_PTR(ret);
+		}
+	}
+
+	nacl_cg = &se_nacl->acl_group;
+	nacl_cg->default_groups = se_nacl->acl_default_groups;
+	nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
+	nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
+	nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
+	nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group;
+	nacl_cg->default_groups[4] = NULL;
+
+	config_group_init_type_name(&se_nacl->acl_group, name,
+			&tf->tf_tpg_nacl_base_cit);
+	config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
+			&tf->tf_tpg_nacl_attrib_cit);
+	config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
+			&tf->tf_tpg_nacl_auth_cit);
+	config_group_init_type_name(&se_nacl->acl_param_group, "param",
+			&tf->tf_tpg_nacl_param_cit);
+	config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
+			"fabric_statistics", &tf->tf_tpg_nacl_stat_cit);
+
+	return &se_nacl->acl_group;
+}
+
+static void target_fabric_drop_nodeacl(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_node_acl *se_nacl = container_of(to_config_group(item),
+			struct se_node_acl, acl_group);
+	struct config_item *df_item;
+	struct config_group *nacl_cg;
+	int i;
+
+	nacl_cg = &se_nacl->acl_group;
+	for (i = 0; nacl_cg->default_groups[i]; i++) {
+		df_item = &nacl_cg->default_groups[i]->cg_item;
+		nacl_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	/*
+	 * struct se_node_acl free is done in target_fabric_nacl_base_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_nacl_group_ops = {
+	.make_group	= target_fabric_make_nodeacl,
+	.drop_item	= target_fabric_drop_nodeacl,
+};
+
+TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_cit */
+
+/* Start of tfc_tpg_np_base_cit */
+
+static void target_fabric_np_base_release(struct config_item *item)
+{
+	struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+				struct se_tpg_np, tpg_np_group);
+	struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	tf->tf_ops->fabric_drop_np(se_tpg_np);
+}
+
+static struct configfs_item_operations target_fabric_np_base_item_ops = {
+	.release		= target_fabric_np_base_release,
+};
+
+TF_CIT_SETUP_DRV(tpg_np_base, &target_fabric_np_base_item_ops, NULL);
+
+/* End of tfc_tpg_np_base_cit */
+
+/* Start of tfc_tpg_np_cit */
+
+static struct config_group *target_fabric_make_np(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+				struct se_portal_group, tpg_np_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_tpg_np *se_tpg_np;
+
+	if (!tf->tf_ops->fabric_make_np) {
+		pr_err("tf->tf_ops.fabric_make_np is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_tpg_np = tf->tf_ops->fabric_make_np(se_tpg, group, name);
+	if (!se_tpg_np || IS_ERR(se_tpg_np))
+		return ERR_PTR(-EINVAL);
+
+	se_tpg_np->tpg_np_parent = se_tpg;
+	config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
+			&tf->tf_tpg_np_base_cit);
+
+	return &se_tpg_np->tpg_np_group;
+}
+
+static void target_fabric_drop_np(
+	struct config_group *group,
+	struct config_item *item)
+{
+	/*
+	 * struct se_tpg_np is released via target_fabric_np_base_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_np_group_ops = {
+	.make_group	= &target_fabric_make_np,
+	.drop_item	= &target_fabric_drop_np,
+};
+
+TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
+
+/* End of tfc_tpg_np_cit */
+
+/* Start of tfc_tpg_port_cit */
+
+static struct se_lun *item_to_lun(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_lun,
+			lun_group);
+}
+
+static ssize_t target_fabric_port_alua_tg_pt_gp_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = item_to_lun(item);
+
+	if (!lun || !lun->lun_se_dev)
+		return -ENODEV;
+
+	return core_alua_show_tg_pt_gp_info(lun, page);
+}
+
+static ssize_t target_fabric_port_alua_tg_pt_gp_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_lun *lun = item_to_lun(item);
+
+	if (!lun || !lun->lun_se_dev)
+		return -ENODEV;
+
+	return core_alua_store_tg_pt_gp_info(lun, page, count);
+}
+
+static ssize_t target_fabric_port_alua_tg_pt_offline_show(
+		struct config_item *item, char *page)
+{
+	struct se_lun *lun = item_to_lun(item);
+
+	if (!lun || !lun->lun_se_dev)
+		return -ENODEV;
+
+	return core_alua_show_offline_bit(lun, page);
+}
+
+static ssize_t target_fabric_port_alua_tg_pt_offline_store(
+		struct config_item *item, const char *page, size_t count)
+{
+	struct se_lun *lun = item_to_lun(item);
+
+	if (!lun || !lun->lun_se_dev)
+		return -ENODEV;
+
+	return core_alua_store_offline_bit(lun, page, count);
+}
+
+static ssize_t target_fabric_port_alua_tg_pt_status_show(
+		struct config_item *item, char *page)
+{
+	struct se_lun *lun = item_to_lun(item);
+
+	if (!lun || !lun->lun_se_dev)
+		return -ENODEV;
+
+	return core_alua_show_secondary_status(lun, page);
+}
+
+static ssize_t target_fabric_port_alua_tg_pt_status_store(
+		struct config_item *item, const char *page, size_t count)
+{
+	struct se_lun *lun = item_to_lun(item);
+
+	if (!lun || !lun->lun_se_dev)
+		return -ENODEV;
+
+	return core_alua_store_secondary_status(lun, page, count);
+}
+
+static ssize_t target_fabric_port_alua_tg_pt_write_md_show(
+		struct config_item *item, char *page)
+{
+	struct se_lun *lun = item_to_lun(item);
+
+	if (!lun || !lun->lun_se_dev)
+		return -ENODEV;
+
+	return core_alua_show_secondary_write_metadata(lun, page);
+}
+
+static ssize_t target_fabric_port_alua_tg_pt_write_md_store(
+		struct config_item *item, const char *page, size_t count)
+{
+	struct se_lun *lun = item_to_lun(item);
+
+	if (!lun || !lun->lun_se_dev)
+		return -ENODEV;
+
+	return core_alua_store_secondary_write_metadata(lun, page, count);
+}
+
+CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_gp);
+CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_offline);
+CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_status);
+CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_write_md);
+
+static struct configfs_attribute *target_fabric_port_attrs[] = {
+	&target_fabric_port_attr_alua_tg_pt_gp,
+	&target_fabric_port_attr_alua_tg_pt_offline,
+	&target_fabric_port_attr_alua_tg_pt_status,
+	&target_fabric_port_attr_alua_tg_pt_write_md,
+	NULL,
+};
+
+static int target_fabric_port_link(
+	struct config_item *lun_ci,
+	struct config_item *se_dev_ci)
+{
+	struct config_item *tpg_ci;
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+				struct se_lun, lun_group);
+	struct se_portal_group *se_tpg;
+	struct se_device *dev =
+		container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
+		pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
+			" %p to struct se_device: %p\n", se_dev_ci, dev);
+		return -EFAULT;
+	}
+
+	if (!(dev->dev_flags & DF_CONFIGURED)) {
+		pr_err("se_device not configured yet, cannot port link\n");
+		return -ENODEV;
+	}
+
+	tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
+	se_tpg = container_of(to_config_group(tpg_ci),
+				struct se_portal_group, tpg_group);
+	tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (lun->lun_se_dev !=  NULL) {
+		pr_err("Port Symlink already exists\n");
+		return -EEXIST;
+	}
+
+	ret = core_dev_add_lun(se_tpg, dev, lun);
+	if (ret) {
+		pr_err("core_dev_add_lun() failed: %d\n", ret);
+		goto out;
+	}
+
+	if (tf->tf_ops->fabric_post_link) {
+		/*
+		 * Call the optional fabric_post_link() to allow a
+		 * fabric module to setup any additional state once
+		 * core_dev_add_lun() has been called..
+		 */
+		tf->tf_ops->fabric_post_link(se_tpg, lun);
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static int target_fabric_port_unlink(
+	struct config_item *lun_ci,
+	struct config_item *se_dev_ci)
+{
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+				struct se_lun, lun_group);
+	struct se_portal_group *se_tpg = lun->lun_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (tf->tf_ops->fabric_pre_unlink) {
+		/*
+		 * Call the optional fabric_pre_unlink() to allow a
+		 * fabric module to release any additional stat before
+		 * core_dev_del_lun() is called.
+		*/
+		tf->tf_ops->fabric_pre_unlink(se_tpg, lun);
+	}
+
+	core_dev_del_lun(se_tpg, lun);
+	return 0;
+}
+
+static void target_fabric_port_release(struct config_item *item)
+{
+	struct se_lun *lun = container_of(to_config_group(item),
+					  struct se_lun, lun_group);
+
+	kfree_rcu(lun, rcu_head);
+}
+
+static struct configfs_item_operations target_fabric_port_item_ops = {
+	.release		= target_fabric_port_release,
+	.allow_link		= target_fabric_port_link,
+	.drop_link		= target_fabric_port_unlink,
+};
+
+TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
+
+/* End of tfc_tpg_port_cit */
+
+/* Start of tfc_tpg_port_stat_cit */
+
+static struct config_group *target_core_port_stat_mkdir(
+	struct config_group *group,
+	const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_port_stat_rmdir(
+	struct config_group *group,
+	struct config_item *item)
+{
+	return;
+}
+
+static struct configfs_group_operations target_fabric_port_stat_group_ops = {
+	.make_group		= target_core_port_stat_mkdir,
+	.drop_item		= target_core_port_stat_rmdir,
+};
+
+TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL);
+
+/* End of tfc_tpg_port_stat_cit */
+
+/* Start of tfc_tpg_lun_cit */
+
+static struct config_group *target_fabric_make_lun(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_lun *lun;
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_lun_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
+	unsigned long long unpacked_lun;
+	int errno;
+
+	if (strstr(name, "lun_") != name) {
+		pr_err("Unable to locate \'_\" in"
+				" \"lun_$LUN_NUMBER\"\n");
+		return ERR_PTR(-EINVAL);
+	}
+	errno = kstrtoull(name + 4, 0, &unpacked_lun);
+	if (errno)
+		return ERR_PTR(errno);
+
+	lun = core_tpg_alloc_lun(se_tpg, unpacked_lun);
+	if (IS_ERR(lun))
+		return ERR_CAST(lun);
+
+	lun_cg = &lun->lun_group;
+	lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
+				GFP_KERNEL);
+	if (!lun_cg->default_groups) {
+		pr_err("Unable to allocate lun_cg->default_groups\n");
+		kfree(lun);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	config_group_init_type_name(&lun->lun_group, name,
+			&tf->tf_tpg_port_cit);
+	config_group_init_type_name(&lun->port_stat_grps.stat_group,
+			"statistics", &tf->tf_tpg_port_stat_cit);
+	lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
+	lun_cg->default_groups[1] = NULL;
+
+	port_stat_grp = &lun->port_stat_grps.stat_group;
+	port_stat_grp->default_groups =  kzalloc(sizeof(struct config_group *) * 4,
+				GFP_KERNEL);
+	if (!port_stat_grp->default_groups) {
+		pr_err("Unable to allocate port_stat_grp->default_groups\n");
+		kfree(lun_cg->default_groups);
+		kfree(lun);
+		return ERR_PTR(-ENOMEM);
+	}
+	target_stat_setup_port_default_groups(lun);
+
+	return &lun->lun_group;
+}
+
+static void target_fabric_drop_lun(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_lun *lun = container_of(to_config_group(item),
+				struct se_lun, lun_group);
+	struct config_item *df_item;
+	struct config_group *lun_cg, *port_stat_grp;
+	int i;
+
+	port_stat_grp = &lun->port_stat_grps.stat_group;
+	for (i = 0; port_stat_grp->default_groups[i]; i++) {
+		df_item = &port_stat_grp->default_groups[i]->cg_item;
+		port_stat_grp->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(port_stat_grp->default_groups);
+
+	lun_cg = &lun->lun_group;
+	for (i = 0; lun_cg->default_groups[i]; i++) {
+		df_item = &lun_cg->default_groups[i]->cg_item;
+		lun_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(lun_cg->default_groups);
+
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_lun_group_ops = {
+	.make_group	= &target_fabric_make_lun,
+	.drop_item	= &target_fabric_drop_lun,
+};
+
+TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
+
+/* End of tfc_tpg_lun_cit */
+
+TF_CIT_SETUP_DRV(tpg_attrib, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_auth, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_param, NULL, NULL);
+
+/* Start of tfc_tpg_base_cit */
+
+static void target_fabric_tpg_release(struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+			struct se_portal_group, tpg_group);
+	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+	tf->tf_ops->fabric_drop_tpg(se_tpg);
+}
+
+static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+	.release		= target_fabric_tpg_release,
+};
+
+TF_CIT_SETUP_DRV(tpg_base, &target_fabric_tpg_base_item_ops, NULL);
+
+/* End of tfc_tpg_base_cit */
+
+/* Start of tfc_tpg_cit */
+
+static struct config_group *target_fabric_make_tpg(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+	struct se_portal_group *se_tpg;
+
+	if (!tf->tf_ops->fabric_make_tpg) {
+		pr_err("tf->tf_ops->fabric_make_tpg is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name);
+	if (!se_tpg || IS_ERR(se_tpg))
+		return ERR_PTR(-EINVAL);
+	/*
+	 * Setup default groups from pre-allocated se_tpg->tpg_default_groups
+	 */
+	se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
+	se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
+	se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
+	se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
+	se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
+	se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_auth_group;
+	se_tpg->tpg_group.default_groups[5] = &se_tpg->tpg_param_group;
+	se_tpg->tpg_group.default_groups[6] = NULL;
+
+	config_group_init_type_name(&se_tpg->tpg_group, name,
+			&tf->tf_tpg_base_cit);
+	config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
+			&tf->tf_tpg_lun_cit);
+	config_group_init_type_name(&se_tpg->tpg_np_group, "np",
+			&tf->tf_tpg_np_cit);
+	config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
+			&tf->tf_tpg_nacl_cit);
+	config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
+			&tf->tf_tpg_attrib_cit);
+	config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
+			&tf->tf_tpg_auth_cit);
+	config_group_init_type_name(&se_tpg->tpg_param_group, "param",
+			&tf->tf_tpg_param_cit);
+
+	return &se_tpg->tpg_group;
+}
+
+static void target_fabric_drop_tpg(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+				struct se_portal_group, tpg_group);
+	struct config_group *tpg_cg = &se_tpg->tpg_group;
+	struct config_item *df_item;
+	int i;
+	/*
+	 * Release default groups, but do not release tpg_cg->default_groups
+	 * memory as it is statically allocated at se_tpg->tpg_default_groups.
+	 */
+	for (i = 0; tpg_cg->default_groups[i]; i++) {
+		df_item = &tpg_cg->default_groups[i]->cg_item;
+		tpg_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+}
+
+static void target_fabric_release_wwn(struct config_item *item)
+{
+	struct se_wwn *wwn = container_of(to_config_group(item),
+				struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+	tf->tf_ops->fabric_drop_wwn(wwn);
+}
+
+static struct configfs_item_operations target_fabric_tpg_item_ops = {
+	.release	= target_fabric_release_wwn,
+};
+
+static struct configfs_group_operations target_fabric_tpg_group_ops = {
+	.make_group	= target_fabric_make_tpg,
+	.drop_item	= target_fabric_drop_tpg,
+};
+
+TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
+		NULL);
+
+/* End of tfc_tpg_cit */
+
+/* Start of tfc_wwn_fabric_stats_cit */
+/*
+ * This is used as a placeholder for struct se_wwn->fabric_stat_group
+ * to allow fabrics access to ->fabric_stat_group->default_groups[]
+ */
+TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
+
+/* End of tfc_wwn_fabric_stats_cit */
+
+/* Start of tfc_wwn_cit */
+
+static struct config_group *target_fabric_make_wwn(
+	struct config_group *group,
+	const char *name)
+{
+	struct target_fabric_configfs *tf = container_of(group,
+				struct target_fabric_configfs, tf_group);
+	struct se_wwn *wwn;
+
+	if (!tf->tf_ops->fabric_make_wwn) {
+		pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	wwn = tf->tf_ops->fabric_make_wwn(tf, group, name);
+	if (!wwn || IS_ERR(wwn))
+		return ERR_PTR(-EINVAL);
+
+	wwn->wwn_tf = tf;
+	/*
+	 * Setup default groups from pre-allocated wwn->wwn_default_groups
+	 */
+	wwn->wwn_group.default_groups = wwn->wwn_default_groups;
+	wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
+	wwn->wwn_group.default_groups[1] = NULL;
+
+	config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
+	config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
+			&tf->tf_wwn_fabric_stats_cit);
+
+	return &wwn->wwn_group;
+}
+
+static void target_fabric_drop_wwn(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_wwn *wwn = container_of(to_config_group(item),
+				struct se_wwn, wwn_group);
+	struct config_item *df_item;
+	struct config_group *cg = &wwn->wwn_group;
+	int i;
+
+	for (i = 0; cg->default_groups[i]; i++) {
+		df_item = &cg->default_groups[i]->cg_item;
+		cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_wwn_group_ops = {
+	.make_group	= target_fabric_make_wwn,
+	.drop_item	= target_fabric_drop_wwn,
+};
+
+TF_CIT_SETUP_DRV(wwn, NULL, &target_fabric_wwn_group_ops);
+TF_CIT_SETUP_DRV(discovery, NULL, NULL);
+
+int target_fabric_setup_cits(struct target_fabric_configfs *tf)
+{
+	target_fabric_setup_discovery_cit(tf);
+	target_fabric_setup_wwn_cit(tf);
+	target_fabric_setup_wwn_fabric_stats_cit(tf);
+	target_fabric_setup_tpg_cit(tf);
+	target_fabric_setup_tpg_base_cit(tf);
+	target_fabric_setup_tpg_port_cit(tf);
+	target_fabric_setup_tpg_port_stat_cit(tf);
+	target_fabric_setup_tpg_lun_cit(tf);
+	target_fabric_setup_tpg_np_cit(tf);
+	target_fabric_setup_tpg_np_base_cit(tf);
+	target_fabric_setup_tpg_attrib_cit(tf);
+	target_fabric_setup_tpg_auth_cit(tf);
+	target_fabric_setup_tpg_param_cit(tf);
+	target_fabric_setup_tpg_nacl_cit(tf);
+	target_fabric_setup_tpg_nacl_base_cit(tf);
+	target_fabric_setup_tpg_nacl_attrib_cit(tf);
+	target_fabric_setup_tpg_nacl_auth_cit(tf);
+	target_fabric_setup_tpg_nacl_param_cit(tf);
+	target_fabric_setup_tpg_nacl_stat_cit(tf);
+	target_fabric_setup_tpg_mappedlun_cit(tf);
+	target_fabric_setup_tpg_mappedlun_stat_cit(tf);
+
+	return 0;
+}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
new file mode 100644
index 0000000..cb6497c
--- /dev/null
+++ b/drivers/target/target_core_fabric_lib.c
@@ -0,0 +1,436 @@
+/*******************************************************************************
+ * Filename:  target_core_fabric_lib.c
+ *
+ * This file contains generic high level protocol identifier and PR
+ * handlers for TCM fabric modules
+ *
+ * (c) Copyright 2010-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+/*
+ * See SPC4, section 7.5 "Protocol specific parameters" for details
+ * on the formats implemented in this file.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#include <scsi/scsi_proto.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_pr.h"
+
+
+static int sas_get_pr_transport_id(
+	struct se_node_acl *nacl,
+	int *format_code,
+	unsigned char *buf)
+{
+	int ret;
+
+	/* Skip over 'naa. prefix */
+	ret = hex2bin(&buf[4], &nacl->initiatorname[4], 8);
+	if (ret) {
+		pr_debug("%s: invalid hex string\n", __func__);
+		return ret;
+	}
+
+	return 24;
+}
+
+static int fc_get_pr_transport_id(
+	struct se_node_acl *se_nacl,
+	int *format_code,
+	unsigned char *buf)
+{
+	unsigned char *ptr;
+	int i, ret;
+	u32 off = 8;
+
+	/*
+	 * We convert the ASCII formatted N Port name into a binary
+	 * encoded TransportID.
+	 */
+	ptr = &se_nacl->initiatorname[0];
+	for (i = 0; i < 24; ) {
+		if (!strncmp(&ptr[i], ":", 1)) {
+			i++;
+			continue;
+		}
+		ret = hex2bin(&buf[off++], &ptr[i], 1);
+		if (ret < 0) {
+			pr_debug("%s: invalid hex string\n", __func__);
+			return ret;
+		}
+		i += 2;
+	}
+	/*
+	 * The FC Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+
+static int sbp_get_pr_transport_id(
+	struct se_node_acl *nacl,
+	int *format_code,
+	unsigned char *buf)
+{
+	int ret;
+
+	ret = hex2bin(&buf[8], nacl->initiatorname, 8);
+	if (ret) {
+		pr_debug("%s: invalid hex string\n", __func__);
+		return ret;
+	}
+
+	return 24;
+}
+
+static int srp_get_pr_transport_id(
+	struct se_node_acl *nacl,
+	int *format_code,
+	unsigned char *buf)
+{
+	const char *p;
+	unsigned len, count, leading_zero_bytes;
+	int rc;
+
+	p = nacl->initiatorname;
+	if (strncasecmp(p, "0x", 2) == 0)
+		p += 2;
+	len = strlen(p);
+	if (len % 2)
+		return -EINVAL;
+
+	count = min(len / 2, 16U);
+	leading_zero_bytes = 16 - count;
+	memset(buf + 8, 0, leading_zero_bytes);
+	rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
+	if (rc < 0) {
+		pr_debug("hex2bin failed for %s: %d\n", __func__, rc);
+		return rc;
+	}
+
+	return 24;
+}
+
+static int iscsi_get_pr_transport_id(
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	u32 off = 4, padding = 0;
+	u16 len = 0;
+
+	spin_lock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * From spc4r17 Section 7.5.4.6: TransportID for initiator
+	 * ports using SCSI over iSCSI.
+	 *
+	 * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
+	 * shall contain the iSCSI name of an iSCSI initiator node (see
+	 * RFC 3720). The first ISCSI NAME field byte containing an ASCII
+	 * null character terminates the ISCSI NAME field without regard for
+	 * the specified length of the iSCSI TransportID or the contents of
+	 * the ADDITIONAL LENGTH field.
+	 */
+	len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
+	/*
+	 * Add Extra byte for NULL terminator
+	 */
+	len++;
+	/*
+	 * If there is ISID present with the registration and *format code == 1
+	 * 1, use iSCSI Initiator port TransportID format.
+	 *
+	 * Otherwise use iSCSI Initiator device TransportID format that
+	 * does not contain the ASCII encoded iSCSI Initiator iSID value
+	 * provied by the iSCSi Initiator during the iSCSI login process.
+	 */
+	if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
+		/*
+		 * Set FORMAT CODE 01b for iSCSI Initiator port TransportID
+		 * format.
+		 */
+		buf[0] |= 0x40;
+		/*
+		 * From spc4r17 Section 7.5.4.6: TransportID for initiator
+		 * ports using SCSI over iSCSI.  Table 390
+		 *
+		 * The SEPARATOR field shall contain the five ASCII
+		 * characters ",i,0x".
+		 *
+		 * The null-terminated, null-padded ISCSI INITIATOR SESSION ID
+		 * field shall contain the iSCSI initiator session identifier
+		 * (see RFC 3720) in the form of ASCII characters that are the
+		 * hexadecimal digits converted from the binary iSCSI initiator
+		 * session identifier value. The first ISCSI INITIATOR SESSION
+		 * ID field byte containing an ASCII null character
+		 */
+		buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+		buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
+		buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+		buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
+		buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
+		len += 5;
+		buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
+		buf[off+len] = '\0'; off++;
+		len += 7;
+	}
+	spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+	 * in the TransportID. The additional length shall be at least 20 and
+	 * shall be a multiple of four.
+	*/
+	padding = ((-len) & 3);
+	if (padding != 0)
+		len += padding;
+
+	buf[2] = ((len >> 8) & 0xff);
+	buf[3] = (len & 0xff);
+	/*
+	 * Increment value for total payload + header length for
+	 * full status descriptor
+	 */
+	len += 4;
+
+	return len;
+}
+
+static int iscsi_get_pr_transport_id_len(
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	u32 len = 0, padding = 0;
+
+	spin_lock_irq(&se_nacl->nacl_sess_lock);
+	len = strlen(se_nacl->initiatorname);
+	/*
+	 * Add extra byte for NULL terminator
+	 */
+	len++;
+	/*
+	 * If there is ISID present with the registration, use format code:
+	 * 01b: iSCSI Initiator port TransportID format
+	 *
+	 * If there is not an active iSCSI session, use format code:
+	 * 00b: iSCSI Initiator device TransportID format
+	 */
+	if (pr_reg->isid_present_at_reg) {
+		len += 5; /* For ",i,0x" ASCII separator */
+		len += 7; /* For iSCSI Initiator Session ID + Null terminator */
+		*format_code = 1;
+	} else
+		*format_code = 0;
+	spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+	 * in the TransportID. The additional length shall be at least 20 and
+	 * shall be a multiple of four.
+	 */
+	padding = ((-len) & 3);
+	if (padding != 0)
+		len += padding;
+	/*
+	 * Increment value for total payload + header length for
+	 * full status descriptor
+	 */
+	len += 4;
+
+	return len;
+}
+
+static char *iscsi_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	char *p;
+	u32 tid_len, padding;
+	int i;
+	u16 add_len;
+	u8 format_code = (buf[0] & 0xc0);
+	/*
+	 * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
+	 *
+	 *       TransportID for initiator ports using SCSI over iSCSI,
+	 *       from Table 388 -- iSCSI TransportID formats.
+	 *
+	 *    00b     Initiator port is identified using the world wide unique
+	 *            SCSI device name of the iSCSI initiator
+	 *            device containing the initiator port (see table 389).
+	 *    01b     Initiator port is identified using the world wide unique
+	 *            initiator port identifier (see table 390).10b to 11b
+	 *            Reserved
+	 */
+	if ((format_code != 0x00) && (format_code != 0x40)) {
+		pr_err("Illegal format code: 0x%02x for iSCSI"
+			" Initiator Transport ID\n", format_code);
+		return NULL;
+	}
+	/*
+	 * If the caller wants the TransportID Length, we set that value for the
+	 * entire iSCSI Tarnsport ID now.
+	 */
+	if (out_tid_len) {
+		/* The shift works thanks to integer promotion rules */
+		add_len = (buf[2] << 8) | buf[3];
+
+		tid_len = strlen(&buf[4]);
+		tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
+		tid_len += 1; /* Add one byte for NULL terminator */
+		padding = ((-tid_len) & 3);
+		if (padding != 0)
+			tid_len += padding;
+
+		if ((add_len + 4) != tid_len) {
+			pr_debug("LIO-Target Extracted add_len: %hu "
+				"does not match calculated tid_len: %u,"
+				" using tid_len instead\n", add_len+4, tid_len);
+			*out_tid_len = tid_len;
+		} else
+			*out_tid_len = (add_len + 4);
+	}
+	/*
+	 * Check for ',i,0x' separator between iSCSI Name and iSCSI Initiator
+	 * Session ID as defined in Table 390 - iSCSI initiator port TransportID
+	 * format.
+	 */
+	if (format_code == 0x40) {
+		p = strstr(&buf[4], ",i,0x");
+		if (!p) {
+			pr_err("Unable to locate \",i,0x\" separator"
+				" for Initiator port identifier: %s\n",
+				&buf[4]);
+			return NULL;
+		}
+		*p = '\0'; /* Terminate iSCSI Name */
+		p += 5; /* Skip over ",i,0x" separator */
+
+		*port_nexus_ptr = p;
+		/*
+		 * Go ahead and do the lower case conversion of the received
+		 * 12 ASCII characters representing the ISID in the TransportID
+		 * for comparison against the running iSCSI session's ISID from
+		 * iscsi_target.c:lio_sess_get_initiator_sid()
+		 */
+		for (i = 0; i < 12; i++) {
+			if (isdigit(*p)) {
+				p++;
+				continue;
+			}
+			*p = tolower(*p);
+			p++;
+		}
+	}
+
+	return (char *)&buf[4];
+}
+
+int target_get_pr_transport_id_len(struct se_node_acl *nacl,
+		struct t10_pr_registration *pr_reg, int *format_code)
+{
+	switch (nacl->se_tpg->proto_id) {
+	case SCSI_PROTOCOL_FCP:
+	case SCSI_PROTOCOL_SBP:
+	case SCSI_PROTOCOL_SRP:
+	case SCSI_PROTOCOL_SAS:
+		break;
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_pr_transport_id_len(nacl, pr_reg, format_code);
+	default:
+		pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id);
+		return -EINVAL;
+	}
+
+	/*
+	 * Most transports use a fixed length 24 byte identifier.
+	 */
+	*format_code = 0;
+	return 24;
+}
+
+int target_get_pr_transport_id(struct se_node_acl *nacl,
+		struct t10_pr_registration *pr_reg, int *format_code,
+		unsigned char *buf)
+{
+	switch (nacl->se_tpg->proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_pr_transport_id(nacl, format_code, buf);
+	case SCSI_PROTOCOL_SBP:
+		return sbp_get_pr_transport_id(nacl, format_code, buf);
+	case SCSI_PROTOCOL_SRP:
+		return srp_get_pr_transport_id(nacl, format_code, buf);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_pr_transport_id(nacl, format_code, buf);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_pr_transport_id(nacl, pr_reg, format_code,
+				buf);
+	default:
+		pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id);
+		return -EINVAL;
+	}
+}
+
+const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+		const char *buf, u32 *out_tid_len, char **port_nexus_ptr)
+{
+	u32 offset;
+
+	switch (tpg->proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		/*
+		 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+		 * for initiator ports using SCSI over SAS Serial SCSI Protocol.
+		 */
+		offset = 4;
+		break;
+	case SCSI_PROTOCOL_SBP:
+	case SCSI_PROTOCOL_SRP:
+	case SCSI_PROTOCOL_FCP:
+		offset = 8;
+		break;
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	default:
+		pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id);
+		return NULL;
+	}
+
+	*port_nexus_ptr = NULL;
+	*out_tid_len = 24;
+	return buf + offset;
+}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
new file mode 100644
index 0000000..041a569
--- /dev/null
+++ b/drivers/target/target_core_file.c
@@ -0,0 +1,842 @@
+/*******************************************************************************
+ * Filename:  target_core_file.c
+ *
+ * This file contains the Storage Engine <-> FILEIO transport specific functions
+ *
+ * (c) Copyright 2005-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/falloc.h>
+#include <scsi/scsi_proto.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+
+#include "target_core_file.h"
+
+static inline struct fd_dev *FD_DEV(struct se_device *dev)
+{
+	return container_of(dev, struct fd_dev, dev);
+}
+
+static int fd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct fd_host *fd_host;
+
+	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
+	if (!fd_host) {
+		pr_err("Unable to allocate memory for struct fd_host\n");
+		return -ENOMEM;
+	}
+
+	fd_host->fd_host_id = host_id;
+
+	hba->hba_ptr = fd_host;
+
+	pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
+		TARGET_CORE_VERSION);
+	pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
+		hba->hba_id, fd_host->fd_host_id);
+
+	return 0;
+}
+
+static void fd_detach_hba(struct se_hba *hba)
+{
+	struct fd_host *fd_host = hba->hba_ptr;
+
+	pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
+
+	kfree(fd_host);
+	hba->hba_ptr = NULL;
+}
+
+static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
+{
+	struct fd_dev *fd_dev;
+	struct fd_host *fd_host = hba->hba_ptr;
+
+	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
+	if (!fd_dev) {
+		pr_err("Unable to allocate memory for struct fd_dev\n");
+		return NULL;
+	}
+
+	fd_dev->fd_host = fd_host;
+
+	pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
+
+	return &fd_dev->dev;
+}
+
+static int fd_configure_device(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = FD_DEV(dev);
+	struct fd_host *fd_host = dev->se_hba->hba_ptr;
+	struct file *file;
+	struct inode *inode = NULL;
+	int flags, ret = -EINVAL;
+
+	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+		pr_err("Missing fd_dev_name=\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
+	 * of pure timestamp updates.
+	 */
+	flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+
+	/*
+	 * Optionally allow fd_buffered_io=1 to be enabled for people
+	 * who want use the fs buffer cache as an WriteCache mechanism.
+	 *
+	 * This means that in event of a hard failure, there is a risk
+	 * of silent data-loss if the SCSI client has *not* performed a
+	 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
+	 * to write-out the entire device cache.
+	 */
+	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
+		pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
+		flags &= ~O_DSYNC;
+	}
+
+	file = filp_open(fd_dev->fd_dev_name, flags, 0600);
+	if (IS_ERR(file)) {
+		pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
+		ret = PTR_ERR(file);
+		goto fail;
+	}
+	fd_dev->fd_file = file;
+	/*
+	 * If using a block backend with this struct file, we extract
+	 * fd_dev->fd_[block,dev]_size from struct block_device.
+	 *
+	 * Otherwise, we use the passed fd_size= from configfs
+	 */
+	inode = file->f_mapping->host;
+	if (S_ISBLK(inode->i_mode)) {
+		struct request_queue *q = bdev_get_queue(inode->i_bdev);
+		unsigned long long dev_size;
+
+		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+		/*
+		 * Determine the number of bytes from i_size_read() minus
+		 * one (1) logical sector from underlying struct block_device
+		 */
+		dev_size = (i_size_read(file->f_mapping->host) -
+				       fd_dev->fd_block_size);
+
+		pr_debug("FILEIO: Using size: %llu bytes from struct"
+			" block_device blocks: %llu logical_block_size: %d\n",
+			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
+			fd_dev->fd_block_size);
+
+		if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
+			pr_debug("IFILE: BLOCK Discard support available,"
+				 " disabled by default\n");
+		/*
+		 * Enable write same emulation for IBLOCK and use 0xFFFF as
+		 * the smaller WRITE_SAME(10) only has a two-byte block count.
+		 */
+		dev->dev_attrib.max_write_same_len = 0xFFFF;
+
+		if (blk_queue_nonrot(q))
+			dev->dev_attrib.is_nonrot = 1;
+	} else {
+		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
+			pr_err("FILEIO: Missing fd_dev_size="
+				" parameter, and no backing struct"
+				" block_device\n");
+			goto fail;
+		}
+
+		fd_dev->fd_block_size = FD_BLOCKSIZE;
+		/*
+		 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
+		 */
+		dev->dev_attrib.max_unmap_lba_count = 0x2000;
+		/*
+		 * Currently hardcoded to 1 in Linux/SCSI code..
+		 */
+		dev->dev_attrib.max_unmap_block_desc_count = 1;
+		dev->dev_attrib.unmap_granularity = 1;
+		dev->dev_attrib.unmap_granularity_alignment = 0;
+
+		/*
+		 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
+		 * based upon struct iovec limit for vfs_writev()
+		 */
+		dev->dev_attrib.max_write_same_len = 0x1000;
+	}
+
+	dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
+	dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
+	dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
+	dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
+
+	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
+		pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
+			" with FDBD_HAS_BUFFERED_IO_WCE\n");
+		dev->dev_attrib.emulate_write_cache = 1;
+	}
+
+	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+	fd_dev->fd_queue_depth = dev->queue_depth;
+
+	pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
+			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
+
+	return 0;
+fail:
+	if (fd_dev->fd_file) {
+		filp_close(fd_dev->fd_file, NULL);
+		fd_dev->fd_file = NULL;
+	}
+	return ret;
+}
+
+static void fd_dev_call_rcu(struct rcu_head *p)
+{
+	struct se_device *dev = container_of(p, struct se_device, rcu_head);
+	struct fd_dev *fd_dev = FD_DEV(dev);
+
+	kfree(fd_dev);
+}
+
+static void fd_free_device(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = FD_DEV(dev);
+
+	if (fd_dev->fd_file) {
+		filp_close(fd_dev->fd_file, NULL);
+		fd_dev->fd_file = NULL;
+	}
+	call_rcu(&dev->rcu_head, fd_dev_call_rcu);
+}
+
+static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
+		    u32 block_size, struct scatterlist *sgl,
+		    u32 sgl_nents, u32 data_length, int is_write)
+{
+	struct scatterlist *sg;
+	struct iov_iter iter;
+	struct bio_vec *bvec;
+	ssize_t len = 0;
+	loff_t pos = (cmd->t_task_lba * block_size);
+	int ret = 0, i;
+
+	bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
+	if (!bvec) {
+		pr_err("Unable to allocate fd_do_readv iov[]\n");
+		return -ENOMEM;
+	}
+
+	for_each_sg(sgl, sg, sgl_nents, i) {
+		bvec[i].bv_page = sg_page(sg);
+		bvec[i].bv_len = sg->length;
+		bvec[i].bv_offset = sg->offset;
+
+		len += sg->length;
+	}
+
+	iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len);
+	if (is_write)
+		ret = vfs_iter_write(fd, &iter, &pos);
+	else
+		ret = vfs_iter_read(fd, &iter, &pos);
+
+	kfree(bvec);
+
+	if (is_write) {
+		if (ret < 0 || ret != data_length) {
+			pr_err("%s() write returned %d\n", __func__, ret);
+			return (ret < 0 ? ret : -EINVAL);
+		}
+	} else {
+		/*
+		 * Return zeros and GOOD status even if the READ did not return
+		 * the expected virt_size for struct file w/o a backing struct
+		 * block_device.
+		 */
+		if (S_ISBLK(file_inode(fd)->i_mode)) {
+			if (ret < 0 || ret != data_length) {
+				pr_err("%s() returned %d, expecting %u for "
+						"S_ISBLK\n", __func__, ret,
+						data_length);
+				return (ret < 0 ? ret : -EINVAL);
+			}
+		} else {
+			if (ret < 0) {
+				pr_err("%s() returned %d for non S_ISBLK\n",
+						__func__, ret);
+				return ret;
+			}
+		}
+	}
+	return 1;
+}
+
+static sense_reason_t
+fd_execute_sync_cache(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct fd_dev *fd_dev = FD_DEV(dev);
+	int immed = (cmd->t_task_cdb[1] & 0x2);
+	loff_t start, end;
+	int ret;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op
+	 */
+	if (immed)
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+
+	/*
+	 * Determine if we will be flushing the entire device.
+	 */
+	if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
+		start = 0;
+		end = LLONG_MAX;
+	} else {
+		start = cmd->t_task_lba * dev->dev_attrib.block_size;
+		if (cmd->data_length)
+			end = start + cmd->data_length - 1;
+		else
+			end = LLONG_MAX;
+	}
+
+	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+	if (ret != 0)
+		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
+
+	if (immed)
+		return 0;
+
+	if (ret)
+		target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
+	else
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+
+	return 0;
+}
+
+static sense_reason_t
+fd_execute_write_same(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	struct fd_dev *fd_dev = FD_DEV(se_dev);
+	loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
+	sector_t nolb = sbc_get_write_same_sectors(cmd);
+	struct iov_iter iter;
+	struct bio_vec *bvec;
+	unsigned int len = 0, i;
+	ssize_t ret;
+
+	if (!nolb) {
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+		return 0;
+	}
+	if (cmd->prot_op) {
+		pr_err("WRITE_SAME: Protection information with FILEIO"
+		       " backends not supported\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+
+	if (cmd->t_data_nents > 1 ||
+	    cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
+		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
+			" block_size: %u\n",
+			cmd->t_data_nents,
+			cmd->t_data_sg[0].length,
+			cmd->se_dev->dev_attrib.block_size);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
+	if (!bvec)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	for (i = 0; i < nolb; i++) {
+		bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
+		bvec[i].bv_len = cmd->t_data_sg[0].length;
+		bvec[i].bv_offset = cmd->t_data_sg[0].offset;
+
+		len += se_dev->dev_attrib.block_size;
+	}
+
+	iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len);
+	ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos);
+
+	kfree(bvec);
+	if (ret < 0 || ret != len) {
+		pr_err("vfs_iter_write() returned %zd for write same\n", ret);
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+
+	target_complete_cmd(cmd, SAM_STAT_GOOD);
+	return 0;
+}
+
+static int
+fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
+		void *buf, size_t bufsize)
+{
+	struct fd_dev *fd_dev = FD_DEV(se_dev);
+	struct file *prot_fd = fd_dev->fd_prot_file;
+	sector_t prot_length, prot;
+	loff_t pos = lba * se_dev->prot_length;
+
+	if (!prot_fd) {
+		pr_err("Unable to locate fd_dev->fd_prot_file\n");
+		return -ENODEV;
+	}
+
+	prot_length = nolb * se_dev->prot_length;
+
+	for (prot = 0; prot < prot_length;) {
+		sector_t len = min_t(sector_t, bufsize, prot_length - prot);
+		ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
+
+		if (ret != len) {
+			pr_err("vfs_write to prot file failed: %zd\n", ret);
+			return ret < 0 ? ret : -ENODEV;
+		}
+		prot += ret;
+	}
+
+	return 0;
+}
+
+static int
+fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
+{
+	void *buf;
+	int rc;
+
+	buf = (void *)__get_free_page(GFP_KERNEL);
+	if (!buf) {
+		pr_err("Unable to allocate FILEIO prot buf\n");
+		return -ENOMEM;
+	}
+	memset(buf, 0xff, PAGE_SIZE);
+
+	rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
+
+	free_page((unsigned long)buf);
+
+	return rc;
+}
+
+static sense_reason_t
+fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
+{
+	struct file *file = FD_DEV(cmd->se_dev)->fd_file;
+	struct inode *inode = file->f_mapping->host;
+	int ret;
+
+	if (cmd->se_dev->dev_attrib.pi_prot_type) {
+		ret = fd_do_prot_unmap(cmd, lba, nolb);
+		if (ret)
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+
+	if (S_ISBLK(inode->i_mode)) {
+		/* The backend is block device, use discard */
+		struct block_device *bdev = inode->i_bdev;
+		struct se_device *dev = cmd->se_dev;
+
+		ret = blkdev_issue_discard(bdev,
+					   target_to_linux_sector(dev, lba),
+					   target_to_linux_sector(dev,  nolb),
+					   GFP_KERNEL, 0);
+		if (ret < 0) {
+			pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
+				ret);
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+	} else {
+		/* The backend is normal file, use fallocate */
+		struct se_device *se_dev = cmd->se_dev;
+		loff_t pos = lba * se_dev->dev_attrib.block_size;
+		unsigned int len = nolb * se_dev->dev_attrib.block_size;
+		int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+
+		if (!file->f_op->fallocate)
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+		ret = file->f_op->fallocate(file, mode, pos, len);
+		if (ret < 0) {
+			pr_warn("FILEIO: fallocate() failed: %d\n", ret);
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+	}
+
+	return 0;
+}
+
+static sense_reason_t
+fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+	      enum dma_data_direction data_direction)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct fd_dev *fd_dev = FD_DEV(dev);
+	struct file *file = fd_dev->fd_file;
+	struct file *pfile = fd_dev->fd_prot_file;
+	sense_reason_t rc;
+	int ret = 0;
+	/*
+	 * We are currently limited by the number of iovecs (2048) per
+	 * single vfs_[writev,readv] call.
+	 */
+	if (cmd->data_length > FD_MAX_BYTES) {
+		pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+		       "FD_MAX_BYTES: %u iovec count limitiation\n",
+			cmd->data_length, FD_MAX_BYTES);
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	/*
+	 * Call vectorized fileio functions to map struct scatterlist
+	 * physical memory addresses to struct iovec virtual memory.
+	 */
+	if (data_direction == DMA_FROM_DEVICE) {
+		if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+			ret = fd_do_rw(cmd, pfile, dev->prot_length,
+				       cmd->t_prot_sg, cmd->t_prot_nents,
+				       cmd->prot_length, 0);
+			if (ret < 0)
+				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+
+		ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
+			       sgl, sgl_nents, cmd->data_length, 0);
+
+		if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+			u32 sectors = cmd->data_length >>
+					ilog2(dev->dev_attrib.block_size);
+
+			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
+					    0, cmd->t_prot_sg, 0);
+			if (rc)
+				return rc;
+		}
+	} else {
+		if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+			u32 sectors = cmd->data_length >>
+					ilog2(dev->dev_attrib.block_size);
+
+			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
+					    0, cmd->t_prot_sg, 0);
+			if (rc)
+				return rc;
+		}
+
+		ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
+			       sgl, sgl_nents, cmd->data_length, 1);
+		/*
+		 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
+		 * for SCSI WRITEs with Forced Unit Access (FUA) set.
+		 * Allow this to happen independent of WCE=0 setting.
+		 */
+		if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
+			loff_t start = cmd->t_task_lba *
+				dev->dev_attrib.block_size;
+			loff_t end;
+
+			if (cmd->data_length)
+				end = start + cmd->data_length - 1;
+			else
+				end = LLONG_MAX;
+
+			vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+		}
+
+		if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+			ret = fd_do_rw(cmd, pfile, dev->prot_length,
+				       cmd->t_prot_sg, cmd->t_prot_nents,
+				       cmd->prot_length, 1);
+			if (ret < 0)
+				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+	}
+
+	if (ret < 0)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	target_complete_cmd(cmd, SAM_STAT_GOOD);
+	return 0;
+}
+
+enum {
+	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_fd_dev_name, "fd_dev_name=%s"},
+	{Opt_fd_dev_size, "fd_dev_size=%s"},
+	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
+		const char *page, ssize_t count)
+{
+	struct fd_dev *fd_dev = FD_DEV(dev);
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_fd_dev_name:
+			if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
+				FD_MAX_DEV_NAME) == 0) {
+				ret = -EINVAL;
+				break;
+			}
+			pr_debug("FILEIO: Referencing Path: %s\n",
+					fd_dev->fd_dev_name);
+			fd_dev->fbd_flags |= FBDF_HAS_PATH;
+			break;
+		case Opt_fd_dev_size:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
+			kfree(arg_p);
+			if (ret < 0) {
+				pr_err("kstrtoull() failed for"
+						" fd_dev_size=\n");
+				goto out;
+			}
+			pr_debug("FILEIO: Referencing Size: %llu"
+					" bytes\n", fd_dev->fd_dev_size);
+			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+			break;
+		case Opt_fd_buffered_io:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			if (arg != 1) {
+				pr_err("bogus fd_buffered_io=%d value\n", arg);
+				ret = -EINVAL;
+				goto out;
+			}
+
+			pr_debug("FILEIO: Using buffered I/O"
+				" operations for struct fd_dev\n");
+
+			fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
+{
+	struct fd_dev *fd_dev = FD_DEV(dev);
+	ssize_t bl = 0;
+
+	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
+		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
+		(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
+		"Buffered-WCE" : "O_DSYNC");
+	return bl;
+}
+
+static sector_t fd_get_blocks(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = FD_DEV(dev);
+	struct file *f = fd_dev->fd_file;
+	struct inode *i = f->f_mapping->host;
+	unsigned long long dev_size;
+	/*
+	 * When using a file that references an underlying struct block_device,
+	 * ensure dev_size is always based on the current inode size in order
+	 * to handle underlying block_device resize operations.
+	 */
+	if (S_ISBLK(i->i_mode))
+		dev_size = i_size_read(i);
+	else
+		dev_size = fd_dev->fd_dev_size;
+
+	return div_u64(dev_size - dev->dev_attrib.block_size,
+		       dev->dev_attrib.block_size);
+}
+
+static int fd_init_prot(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = FD_DEV(dev);
+	struct file *prot_file, *file = fd_dev->fd_file;
+	struct inode *inode;
+	int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+	char buf[FD_MAX_DEV_PROT_NAME];
+
+	if (!file) {
+		pr_err("Unable to locate fd_dev->fd_file\n");
+		return -ENODEV;
+	}
+
+	inode = file->f_mapping->host;
+	if (S_ISBLK(inode->i_mode)) {
+		pr_err("FILEIO Protection emulation only supported on"
+		       " !S_ISBLK\n");
+		return -ENOSYS;
+	}
+
+	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
+		flags &= ~O_DSYNC;
+
+	snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
+		 fd_dev->fd_dev_name);
+
+	prot_file = filp_open(buf, flags, 0600);
+	if (IS_ERR(prot_file)) {
+		pr_err("filp_open(%s) failed\n", buf);
+		ret = PTR_ERR(prot_file);
+		return ret;
+	}
+	fd_dev->fd_prot_file = prot_file;
+
+	return 0;
+}
+
+static int fd_format_prot(struct se_device *dev)
+{
+	unsigned char *buf;
+	int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+	int ret;
+
+	if (!dev->dev_attrib.pi_prot_type) {
+		pr_err("Unable to format_prot while pi_prot_type == 0\n");
+		return -ENODEV;
+	}
+
+	buf = vzalloc(unit_size);
+	if (!buf) {
+		pr_err("Unable to allocate FILEIO prot buf\n");
+		return -ENOMEM;
+	}
+
+	pr_debug("Using FILEIO prot_length: %llu\n",
+		 (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
+					dev->prot_length);
+
+	memset(buf, 0xff, unit_size);
+	ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
+			      buf, unit_size);
+	vfree(buf);
+	return ret;
+}
+
+static void fd_free_prot(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = FD_DEV(dev);
+
+	if (!fd_dev->fd_prot_file)
+		return;
+
+	filp_close(fd_dev->fd_prot_file, NULL);
+	fd_dev->fd_prot_file = NULL;
+}
+
+static struct sbc_ops fd_sbc_ops = {
+	.execute_rw		= fd_execute_rw,
+	.execute_sync_cache	= fd_execute_sync_cache,
+	.execute_write_same	= fd_execute_write_same,
+	.execute_unmap		= fd_execute_unmap,
+};
+
+static sense_reason_t
+fd_parse_cdb(struct se_cmd *cmd)
+{
+	return sbc_parse_cdb(cmd, &fd_sbc_ops);
+}
+
+static const struct target_backend_ops fileio_ops = {
+	.name			= "fileio",
+	.inquiry_prod		= "FILEIO",
+	.inquiry_rev		= FD_VERSION,
+	.owner			= THIS_MODULE,
+	.attach_hba		= fd_attach_hba,
+	.detach_hba		= fd_detach_hba,
+	.alloc_device		= fd_alloc_device,
+	.configure_device	= fd_configure_device,
+	.free_device		= fd_free_device,
+	.parse_cdb		= fd_parse_cdb,
+	.set_configfs_dev_params = fd_set_configfs_dev_params,
+	.show_configfs_dev_params = fd_show_configfs_dev_params,
+	.get_device_type	= sbc_get_device_type,
+	.get_blocks		= fd_get_blocks,
+	.init_prot		= fd_init_prot,
+	.format_prot		= fd_format_prot,
+	.free_prot		= fd_free_prot,
+	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
+};
+
+static int __init fileio_module_init(void)
+{
+	return transport_backend_register(&fileio_ops);
+}
+
+static void __exit fileio_module_exit(void)
+{
+	target_backend_unregister(&fileio_ops);
+}
+
+MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(fileio_module_init);
+module_exit(fileio_module_exit);
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
new file mode 100644
index 0000000..068966f
--- /dev/null
+++ b/drivers/target/target_core_file.h
@@ -0,0 +1,48 @@
+#ifndef TARGET_CORE_FILE_H
+#define TARGET_CORE_FILE_H
+
+#define FD_VERSION		"4.0"
+
+#define FD_MAX_DEV_NAME		256
+#define FD_MAX_DEV_PROT_NAME	FD_MAX_DEV_NAME + 16
+#define FD_DEVICE_QUEUE_DEPTH	32
+#define FD_MAX_DEVICE_QUEUE_DEPTH 128
+#define FD_BLOCKSIZE		512
+/*
+ * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
+ */
+#define FD_MAX_BYTES		8388608
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+#define FBDF_HAS_PATH		0x01
+#define FBDF_HAS_SIZE		0x02
+#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+#define FDBD_FORMAT_UNIT_SIZE	2048
+
+struct fd_dev {
+	struct se_device dev;
+
+	u32		fbd_flags;
+	unsigned char	fd_dev_name[FD_MAX_DEV_NAME];
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		fd_dev_id;
+	/* Number of SG tables in sg_table_array */
+	u32		fd_table_count;
+	u32		fd_queue_depth;
+	u32		fd_block_size;
+	unsigned long long fd_dev_size;
+	struct file	*fd_file;
+	struct file	*fd_prot_file;
+	/* FILEIO HBA device is connected to */
+	struct fd_host *fd_host;
+} ____cacheline_aligned;
+
+struct fd_host {
+	u32		fd_host_dev_id_count;
+	/* Unique FILEIO Host ID */
+	u32		fd_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_FILE_H */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
new file mode 100644
index 0000000..22390e0
--- /dev/null
+++ b/drivers/target/target_core_hba.c
@@ -0,0 +1,191 @@
+/*******************************************************************************
+ * Filename:  target_core_hba.c
+ *
+ * This file contains the TCM HBA Transport related functions.
+ *
+ * (c) Copyright 2003-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/module.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+
+static LIST_HEAD(backend_list);
+static DEFINE_MUTEX(backend_mutex);
+
+static u32 hba_id_counter;
+
+static DEFINE_SPINLOCK(hba_lock);
+static LIST_HEAD(hba_list);
+
+
+int transport_backend_register(const struct target_backend_ops *ops)
+{
+	struct target_backend *tb, *old;
+
+	tb = kzalloc(sizeof(*tb), GFP_KERNEL);
+	if (!tb)
+		return -ENOMEM;
+	tb->ops = ops;
+
+	mutex_lock(&backend_mutex);
+	list_for_each_entry(old, &backend_list, list) {
+		if (!strcmp(old->ops->name, ops->name)) {
+			pr_err("backend %s already registered.\n", ops->name);
+			mutex_unlock(&backend_mutex);
+			kfree(tb);
+			return -EEXIST;
+		}
+	}
+	target_setup_backend_cits(tb);
+	list_add_tail(&tb->list, &backend_list);
+	mutex_unlock(&backend_mutex);
+
+	pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
+			ops->name, ops->owner);
+	return 0;
+}
+EXPORT_SYMBOL(transport_backend_register);
+
+void target_backend_unregister(const struct target_backend_ops *ops)
+{
+	struct target_backend *tb;
+
+	mutex_lock(&backend_mutex);
+	list_for_each_entry(tb, &backend_list, list) {
+		if (tb->ops == ops) {
+			list_del(&tb->list);
+			mutex_unlock(&backend_mutex);
+			/*
+			 * Wait for any outstanding backend driver ->rcu_head
+			 * callbacks to complete post TBO->free_device() ->
+			 * call_rcu(), before allowing backend driver module
+			 * unload of target_backend_ops->owner to proceed.
+			 */
+			rcu_barrier();
+			kfree(tb);
+			return;
+		}
+	}
+	mutex_unlock(&backend_mutex);
+}
+EXPORT_SYMBOL(target_backend_unregister);
+
+static struct target_backend *core_get_backend(const char *name)
+{
+	struct target_backend *tb;
+
+	mutex_lock(&backend_mutex);
+	list_for_each_entry(tb, &backend_list, list) {
+		if (!strcmp(tb->ops->name, name))
+			goto found;
+	}
+	mutex_unlock(&backend_mutex);
+	return NULL;
+found:
+	if (tb->ops->owner && !try_module_get(tb->ops->owner))
+		tb = NULL;
+	mutex_unlock(&backend_mutex);
+	return tb;
+}
+
+struct se_hba *
+core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
+{
+	struct se_hba *hba;
+	int ret = 0;
+
+	hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+	if (!hba) {
+		pr_err("Unable to allocate struct se_hba\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&hba->device_lock);
+	mutex_init(&hba->hba_access_mutex);
+
+	hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
+	hba->hba_flags |= hba_flags;
+
+	hba->backend = core_get_backend(plugin_name);
+	if (!hba->backend) {
+		ret = -EINVAL;
+		goto out_free_hba;
+	}
+
+	ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
+	if (ret < 0)
+		goto out_module_put;
+
+	spin_lock(&hba_lock);
+	hba->hba_id = hba_id_counter++;
+	list_add_tail(&hba->hba_node, &hba_list);
+	spin_unlock(&hba_lock);
+
+	pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
+			" Core\n", hba->hba_id);
+
+	return hba;
+
+out_module_put:
+	module_put(hba->backend->ops->owner);
+	hba->backend = NULL;
+out_free_hba:
+	kfree(hba);
+	return ERR_PTR(ret);
+}
+
+int
+core_delete_hba(struct se_hba *hba)
+{
+	WARN_ON(hba->dev_count);
+
+	hba->backend->ops->detach_hba(hba);
+
+	spin_lock(&hba_lock);
+	list_del(&hba->hba_node);
+	spin_unlock(&hba_lock);
+
+	pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
+			" Core\n", hba->hba_id);
+
+	module_put(hba->backend->ops->owner);
+
+	hba->backend = NULL;
+	kfree(hba);
+	return 0;
+}
+
+bool target_sense_desc_format(struct se_device *dev)
+{
+	return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
+}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
new file mode 100644
index 0000000..4620c1d
--- /dev/null
+++ b/drivers/target/target_core_iblock.c
@@ -0,0 +1,845 @@
+/*******************************************************************************
+ * Filename:  target_core_iblock.c
+ *
+ * This file contains the Storage Engine  <-> Linux BlockIO transport
+ * specific functions.
+ *
+ * (c) Copyright 2003-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/bio.h>
+#include <linux/genhd.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <scsi/scsi_proto.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+
+#include "target_core_iblock.h"
+
+#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
+#define IBLOCK_BIO_POOL_SIZE	128
+
+static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
+{
+	return container_of(dev, struct iblock_dev, dev);
+}
+
+
+static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		IBLOCK_VERSION, TARGET_CORE_VERSION);
+	return 0;
+}
+
+static void iblock_detach_hba(struct se_hba *hba)
+{
+}
+
+static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
+{
+	struct iblock_dev *ib_dev = NULL;
+
+	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
+	if (!ib_dev) {
+		pr_err("Unable to allocate struct iblock_dev\n");
+		return NULL;
+	}
+
+	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
+
+	return &ib_dev->dev;
+}
+
+static int iblock_configure_device(struct se_device *dev)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	struct request_queue *q;
+	struct block_device *bd = NULL;
+	struct blk_integrity *bi;
+	fmode_t mode;
+	int ret = -ENOMEM;
+
+	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+		pr_err("Missing udev_path= parameters for IBLOCK\n");
+		return -EINVAL;
+	}
+
+	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
+	if (!ib_dev->ibd_bio_set) {
+		pr_err("IBLOCK: Unable to create bioset\n");
+		goto out;
+	}
+
+	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
+			ib_dev->ibd_udev_path);
+
+	mode = FMODE_READ|FMODE_EXCL;
+	if (!ib_dev->ibd_readonly)
+		mode |= FMODE_WRITE;
+	else
+		dev->dev_flags |= DF_READ_ONLY;
+
+	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
+	if (IS_ERR(bd)) {
+		ret = PTR_ERR(bd);
+		goto out_free_bioset;
+	}
+	ib_dev->ibd_bd = bd;
+
+	q = bdev_get_queue(bd);
+
+	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
+	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
+	dev->dev_attrib.hw_queue_depth = q->nr_requests;
+
+	if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
+		pr_debug("IBLOCK: BLOCK Discard support available,"
+			 " disabled by default\n");
+
+	/*
+	 * Enable write same emulation for IBLOCK and use 0xFFFF as
+	 * the smaller WRITE_SAME(10) only has a two-byte block count.
+	 */
+	dev->dev_attrib.max_write_same_len = 0xFFFF;
+
+	if (blk_queue_nonrot(q))
+		dev->dev_attrib.is_nonrot = 1;
+
+	bi = bdev_get_integrity(bd);
+	if (bi) {
+		struct bio_set *bs = ib_dev->ibd_bio_set;
+
+		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
+		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
+			pr_err("IBLOCK export of blk_integrity: %s not"
+			       " supported\n", bi->profile->name);
+			ret = -ENOSYS;
+			goto out_blkdev_put;
+		}
+
+		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
+			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
+		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
+			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
+		}
+
+		if (dev->dev_attrib.pi_prot_type) {
+			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
+				pr_err("Unable to allocate bioset for PI\n");
+				ret = -ENOMEM;
+				goto out_blkdev_put;
+			}
+			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
+				 bs->bio_integrity_pool);
+		}
+		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
+	}
+
+	return 0;
+
+out_blkdev_put:
+	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+out_free_bioset:
+	bioset_free(ib_dev->ibd_bio_set);
+	ib_dev->ibd_bio_set = NULL;
+out:
+	return ret;
+}
+
+static void iblock_dev_call_rcu(struct rcu_head *p)
+{
+	struct se_device *dev = container_of(p, struct se_device, rcu_head);
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+	kfree(ib_dev);
+}
+
+static void iblock_free_device(struct se_device *dev)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+	if (ib_dev->ibd_bd != NULL)
+		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+	if (ib_dev->ibd_bio_set != NULL)
+		bioset_free(ib_dev->ibd_bio_set);
+
+	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
+}
+
+static unsigned long long iblock_emulate_read_cap_with_block_size(
+	struct se_device *dev,
+	struct block_device *bd,
+	struct request_queue *q)
+{
+	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
+					bdev_logical_block_size(bd)) - 1);
+	u32 block_size = bdev_logical_block_size(bd);
+
+	if (block_size == dev->dev_attrib.block_size)
+		return blocks_long;
+
+	switch (block_size) {
+	case 4096:
+		switch (dev->dev_attrib.block_size) {
+		case 2048:
+			blocks_long <<= 1;
+			break;
+		case 1024:
+			blocks_long <<= 2;
+			break;
+		case 512:
+			blocks_long <<= 3;
+		default:
+			break;
+		}
+		break;
+	case 2048:
+		switch (dev->dev_attrib.block_size) {
+		case 4096:
+			blocks_long >>= 1;
+			break;
+		case 1024:
+			blocks_long <<= 1;
+			break;
+		case 512:
+			blocks_long <<= 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 1024:
+		switch (dev->dev_attrib.block_size) {
+		case 4096:
+			blocks_long >>= 2;
+			break;
+		case 2048:
+			blocks_long >>= 1;
+			break;
+		case 512:
+			blocks_long <<= 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 512:
+		switch (dev->dev_attrib.block_size) {
+		case 4096:
+			blocks_long >>= 3;
+			break;
+		case 2048:
+			blocks_long >>= 2;
+			break;
+		case 1024:
+			blocks_long >>= 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return blocks_long;
+}
+
+static void iblock_complete_cmd(struct se_cmd *cmd)
+{
+	struct iblock_req *ibr = cmd->priv;
+	u8 status;
+
+	if (!atomic_dec_and_test(&ibr->pending))
+		return;
+
+	if (atomic_read(&ibr->ib_bio_err_cnt))
+		status = SAM_STAT_CHECK_CONDITION;
+	else
+		status = SAM_STAT_GOOD;
+
+	target_complete_cmd(cmd, status);
+	kfree(ibr);
+}
+
+static void iblock_bio_done(struct bio *bio)
+{
+	struct se_cmd *cmd = bio->bi_private;
+	struct iblock_req *ibr = cmd->priv;
+
+	if (bio->bi_error) {
+		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_error);
+		/*
+		 * Bump the ib_bio_err_cnt and release bio.
+		 */
+		atomic_inc(&ibr->ib_bio_err_cnt);
+		smp_mb__after_atomic();
+	}
+
+	bio_put(bio);
+
+	iblock_complete_cmd(cmd);
+}
+
+static struct bio *
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+	struct bio *bio;
+
+	/*
+	 * Only allocate as many vector entries as the bio code allows us to,
+	 * we'll loop later on until we have handled the whole request.
+	 */
+	if (sg_num > BIO_MAX_PAGES)
+		sg_num = BIO_MAX_PAGES;
+
+	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+	if (!bio) {
+		pr_err("Unable to allocate memory for bio\n");
+		return NULL;
+	}
+
+	bio->bi_bdev = ib_dev->ibd_bd;
+	bio->bi_private = cmd;
+	bio->bi_end_io = &iblock_bio_done;
+	bio->bi_iter.bi_sector = lba;
+
+	return bio;
+}
+
+static void iblock_submit_bios(struct bio_list *list, int rw)
+{
+	struct blk_plug plug;
+	struct bio *bio;
+
+	blk_start_plug(&plug);
+	while ((bio = bio_list_pop(list)))
+		submit_bio(rw, bio);
+	blk_finish_plug(&plug);
+}
+
+static void iblock_end_io_flush(struct bio *bio)
+{
+	struct se_cmd *cmd = bio->bi_private;
+
+	if (bio->bi_error)
+		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error);
+
+	if (cmd) {
+		if (bio->bi_error)
+			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
+		else
+			target_complete_cmd(cmd, SAM_STAT_GOOD);
+	}
+
+	bio_put(bio);
+}
+
+/*
+ * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
+ * always flush the whole cache.
+ */
+static sense_reason_t
+iblock_execute_sync_cache(struct se_cmd *cmd)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+	int immed = (cmd->t_task_cdb[1] & 0x2);
+	struct bio *bio;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op.
+	 */
+	if (immed)
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+
+	bio = bio_alloc(GFP_KERNEL, 0);
+	bio->bi_end_io = iblock_end_io_flush;
+	bio->bi_bdev = ib_dev->ibd_bd;
+	if (!immed)
+		bio->bi_private = cmd;
+	submit_bio(WRITE_FLUSH, bio);
+	return 0;
+}
+
+static sense_reason_t
+iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
+{
+	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+	struct se_device *dev = cmd->se_dev;
+	int ret;
+
+	ret = blkdev_issue_discard(bdev,
+				   target_to_linux_sector(dev, lba),
+				   target_to_linux_sector(dev,  nolb),
+				   GFP_KERNEL, 0);
+	if (ret < 0) {
+		pr_err("blkdev_issue_discard() failed: %d\n", ret);
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+
+	return 0;
+}
+
+static sense_reason_t
+iblock_execute_write_same(struct se_cmd *cmd)
+{
+	struct iblock_req *ibr;
+	struct scatterlist *sg;
+	struct bio *bio;
+	struct bio_list list;
+	struct se_device *dev = cmd->se_dev;
+	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
+	sector_t sectors = target_to_linux_sector(dev,
+					sbc_get_write_same_sectors(cmd));
+
+	if (cmd->prot_op) {
+		pr_err("WRITE_SAME: Protection information with IBLOCK"
+		       " backends not supported\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	sg = &cmd->t_data_sg[0];
+
+	if (cmd->t_data_nents > 1 ||
+	    sg->length != cmd->se_dev->dev_attrib.block_size) {
+		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
+			" block_size: %u\n", cmd->t_data_nents, sg->length,
+			cmd->se_dev->dev_attrib.block_size);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+	if (!ibr)
+		goto fail;
+	cmd->priv = ibr;
+
+	bio = iblock_get_bio(cmd, block_lba, 1);
+	if (!bio)
+		goto fail_free_ibr;
+
+	bio_list_init(&list);
+	bio_list_add(&list, bio);
+
+	atomic_set(&ibr->pending, 1);
+
+	while (sectors) {
+		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+				!= sg->length) {
+
+			bio = iblock_get_bio(cmd, block_lba, 1);
+			if (!bio)
+				goto fail_put_bios;
+
+			atomic_inc(&ibr->pending);
+			bio_list_add(&list, bio);
+		}
+
+		/* Always in 512 byte units for Linux/Block */
+		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+		sectors -= 1;
+	}
+
+	iblock_submit_bios(&list, WRITE);
+	return 0;
+
+fail_put_bios:
+	while ((bio = bio_list_pop(&list)))
+		bio_put(bio);
+fail_free_ibr:
+	kfree(ibr);
+fail:
+	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
+enum {
+	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_udev_path, "udev_path=%s"},
+	{Opt_readonly, "readonly=%d"},
+	{Opt_force, "force=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
+		const char *page, ssize_t count)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, token;
+	unsigned long tmp_readonly;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_udev_path:
+			if (ib_dev->ibd_bd) {
+				pr_err("Unable to set udev_path= while"
+					" ib_dev->ibd_bd exists\n");
+				ret = -EEXIST;
+				goto out;
+			}
+			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
+				SE_UDEV_PATH_LEN) == 0) {
+				ret = -EINVAL;
+				break;
+			}
+			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
+					ib_dev->ibd_udev_path);
+			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
+			break;
+		case Opt_readonly:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			ret = kstrtoul(arg_p, 0, &tmp_readonly);
+			kfree(arg_p);
+			if (ret < 0) {
+				pr_err("kstrtoul() failed for"
+						" readonly=\n");
+				goto out;
+			}
+			ib_dev->ibd_readonly = tmp_readonly;
+			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
+			break;
+		case Opt_force:
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	struct block_device *bd = ib_dev->ibd_bd;
+	char buf[BDEVNAME_SIZE];
+	ssize_t bl = 0;
+
+	if (bd)
+		bl += sprintf(b + bl, "iBlock device: %s",
+				bdevname(bd, buf));
+	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
+		bl += sprintf(b + bl, "  UDEV PATH: %s",
+				ib_dev->ibd_udev_path);
+	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
+
+	bl += sprintf(b + bl, "        ");
+	if (bd) {
+		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
+			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
+			"" : (bd->bd_holder == ib_dev) ?
+			"CLAIMED: IBLOCK" : "CLAIMED: OS");
+	} else {
+		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
+	}
+
+	return bl;
+}
+
+static int
+iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct blk_integrity *bi;
+	struct bio_integrity_payload *bip;
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	struct scatterlist *sg;
+	int i, rc;
+
+	bi = bdev_get_integrity(ib_dev->ibd_bd);
+	if (!bi) {
+		pr_err("Unable to locate bio_integrity\n");
+		return -ENODEV;
+	}
+
+	bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
+	if (!bip) {
+		pr_err("Unable to allocate bio_integrity_payload\n");
+		return -ENOMEM;
+	}
+
+	bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
+			 dev->prot_length;
+	bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
+
+	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
+		 (unsigned long long)bip->bip_iter.bi_sector);
+
+	for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
+
+		rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
+					    sg->offset);
+		if (rc != sg->length) {
+			pr_err("bio_integrity_add_page() failed; %d\n", rc);
+			return -ENOMEM;
+		}
+
+		pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
+			 sg_page(sg), sg->length, sg->offset);
+	}
+
+	return 0;
+}
+
+static sense_reason_t
+iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+		  enum dma_data_direction data_direction)
+{
+	struct se_device *dev = cmd->se_dev;
+	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
+	struct iblock_req *ibr;
+	struct bio *bio, *bio_start;
+	struct bio_list list;
+	struct scatterlist *sg;
+	u32 sg_num = sgl_nents;
+	unsigned bio_cnt;
+	int rw = 0;
+	int i;
+
+	if (data_direction == DMA_TO_DEVICE) {
+		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
+		/*
+		 * Force writethrough using WRITE_FUA if a volatile write cache
+		 * is not enabled, or if initiator set the Force Unit Access bit.
+		 */
+		if (q->flush_flags & REQ_FUA) {
+			if (cmd->se_cmd_flags & SCF_FUA)
+				rw = WRITE_FUA;
+			else if (!(q->flush_flags & REQ_FLUSH))
+				rw = WRITE_FUA;
+			else
+				rw = WRITE;
+		} else {
+			rw = WRITE;
+		}
+	} else {
+		rw = READ;
+	}
+
+	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+	if (!ibr)
+		goto fail;
+	cmd->priv = ibr;
+
+	if (!sgl_nents) {
+		atomic_set(&ibr->pending, 1);
+		iblock_complete_cmd(cmd);
+		return 0;
+	}
+
+	bio = iblock_get_bio(cmd, block_lba, sgl_nents);
+	if (!bio)
+		goto fail_free_ibr;
+
+	bio_start = bio;
+	bio_list_init(&list);
+	bio_list_add(&list, bio);
+
+	atomic_set(&ibr->pending, 2);
+	bio_cnt = 1;
+
+	for_each_sg(sgl, sg, sgl_nents, i) {
+		/*
+		 * XXX: if the length the device accepts is shorter than the
+		 *	length of the S/G list entry this will cause and
+		 *	endless loop.  Better hope no driver uses huge pages.
+		 */
+		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+				!= sg->length) {
+			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
+				iblock_submit_bios(&list, rw);
+				bio_cnt = 0;
+			}
+
+			bio = iblock_get_bio(cmd, block_lba, sg_num);
+			if (!bio)
+				goto fail_put_bios;
+
+			atomic_inc(&ibr->pending);
+			bio_list_add(&list, bio);
+			bio_cnt++;
+		}
+
+		/* Always in 512 byte units for Linux/Block */
+		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+		sg_num--;
+	}
+
+	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+		int rc = iblock_alloc_bip(cmd, bio_start);
+		if (rc)
+			goto fail_put_bios;
+	}
+
+	iblock_submit_bios(&list, rw);
+	iblock_complete_cmd(cmd);
+	return 0;
+
+fail_put_bios:
+	while ((bio = bio_list_pop(&list)))
+		bio_put(bio);
+fail_free_ibr:
+	kfree(ibr);
+fail:
+	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
+static sector_t iblock_get_blocks(struct se_device *dev)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	struct block_device *bd = ib_dev->ibd_bd;
+	struct request_queue *q = bdev_get_queue(bd);
+
+	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
+}
+
+static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	struct block_device *bd = ib_dev->ibd_bd;
+	int ret;
+
+	ret = bdev_alignment_offset(bd);
+	if (ret == -1)
+		return 0;
+
+	/* convert offset-bytes to offset-lbas */
+	return ret / bdev_logical_block_size(bd);
+}
+
+static unsigned int iblock_get_lbppbe(struct se_device *dev)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	struct block_device *bd = ib_dev->ibd_bd;
+	int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
+
+	return ilog2(logs_per_phys);
+}
+
+static unsigned int iblock_get_io_min(struct se_device *dev)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	struct block_device *bd = ib_dev->ibd_bd;
+
+	return bdev_io_min(bd);
+}
+
+static unsigned int iblock_get_io_opt(struct se_device *dev)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	struct block_device *bd = ib_dev->ibd_bd;
+
+	return bdev_io_opt(bd);
+}
+
+static struct sbc_ops iblock_sbc_ops = {
+	.execute_rw		= iblock_execute_rw,
+	.execute_sync_cache	= iblock_execute_sync_cache,
+	.execute_write_same	= iblock_execute_write_same,
+	.execute_unmap		= iblock_execute_unmap,
+};
+
+static sense_reason_t
+iblock_parse_cdb(struct se_cmd *cmd)
+{
+	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
+}
+
+static bool iblock_get_write_cache(struct se_device *dev)
+{
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	struct block_device *bd = ib_dev->ibd_bd;
+	struct request_queue *q = bdev_get_queue(bd);
+
+	return q->flush_flags & REQ_FLUSH;
+}
+
+static const struct target_backend_ops iblock_ops = {
+	.name			= "iblock",
+	.inquiry_prod		= "IBLOCK",
+	.inquiry_rev		= IBLOCK_VERSION,
+	.owner			= THIS_MODULE,
+	.attach_hba		= iblock_attach_hba,
+	.detach_hba		= iblock_detach_hba,
+	.alloc_device		= iblock_alloc_device,
+	.configure_device	= iblock_configure_device,
+	.free_device		= iblock_free_device,
+	.parse_cdb		= iblock_parse_cdb,
+	.set_configfs_dev_params = iblock_set_configfs_dev_params,
+	.show_configfs_dev_params = iblock_show_configfs_dev_params,
+	.get_device_type	= sbc_get_device_type,
+	.get_blocks		= iblock_get_blocks,
+	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
+	.get_lbppbe		= iblock_get_lbppbe,
+	.get_io_min		= iblock_get_io_min,
+	.get_io_opt		= iblock_get_io_opt,
+	.get_write_cache	= iblock_get_write_cache,
+	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
+};
+
+static int __init iblock_module_init(void)
+{
+	return transport_backend_register(&iblock_ops);
+}
+
+static void __exit iblock_module_exit(void)
+{
+	target_backend_unregister(&iblock_ops);
+}
+
+MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iblock_module_init);
+module_exit(iblock_module_exit);
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
new file mode 100644
index 0000000..01c2afd
--- /dev/null
+++ b/drivers/target/target_core_iblock.h
@@ -0,0 +1,25 @@
+#ifndef TARGET_CORE_IBLOCK_H
+#define TARGET_CORE_IBLOCK_H
+
+#define IBLOCK_VERSION		"4.0"
+
+#define IBLOCK_MAX_CDBS		16
+#define IBLOCK_LBA_SHIFT	9
+
+struct iblock_req {
+	atomic_t pending;
+	atomic_t ib_bio_err_cnt;
+} ____cacheline_aligned;
+
+#define IBDF_HAS_UDEV_PATH		0x01
+
+struct iblock_dev {
+	struct se_device dev;
+	unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
+	u32	ibd_flags;
+	struct bio_set	*ibd_bio_set;
+	struct block_device *ibd_bd;
+	bool ibd_readonly;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
new file mode 100644
index 0000000..272e6f7
--- /dev/null
+++ b/drivers/target/target_core_internal.h
@@ -0,0 +1,161 @@
+#ifndef TARGET_CORE_INTERNAL_H
+#define TARGET_CORE_INTERNAL_H
+
+#define TARGET_CORE_NAME_MAX_LEN	64
+#define TARGET_FABRIC_NAME_SIZE		32
+
+struct target_backend {
+	struct list_head list;
+
+	const struct target_backend_ops *ops;
+
+	struct config_item_type tb_dev_cit;
+	struct config_item_type tb_dev_attrib_cit;
+	struct config_item_type tb_dev_pr_cit;
+	struct config_item_type tb_dev_wwn_cit;
+	struct config_item_type tb_dev_alua_tg_pt_gps_cit;
+	struct config_item_type tb_dev_stat_cit;
+};
+
+struct target_fabric_configfs {
+	atomic_t		tf_access_cnt;
+	struct list_head	tf_list;
+	struct config_group	tf_group;
+	struct config_group	tf_disc_group;
+	struct config_group	*tf_default_groups[2];
+	const struct target_core_fabric_ops *tf_ops;
+
+	struct config_item_type tf_discovery_cit;
+	struct config_item_type	tf_wwn_cit;
+	struct config_item_type tf_wwn_fabric_stats_cit;
+	struct config_item_type tf_tpg_cit;
+	struct config_item_type tf_tpg_base_cit;
+	struct config_item_type tf_tpg_lun_cit;
+	struct config_item_type tf_tpg_port_cit;
+	struct config_item_type tf_tpg_port_stat_cit;
+	struct config_item_type tf_tpg_np_cit;
+	struct config_item_type tf_tpg_np_base_cit;
+	struct config_item_type tf_tpg_attrib_cit;
+	struct config_item_type tf_tpg_auth_cit;
+	struct config_item_type tf_tpg_param_cit;
+	struct config_item_type tf_tpg_nacl_cit;
+	struct config_item_type tf_tpg_nacl_base_cit;
+	struct config_item_type tf_tpg_nacl_attrib_cit;
+	struct config_item_type tf_tpg_nacl_auth_cit;
+	struct config_item_type tf_tpg_nacl_param_cit;
+	struct config_item_type tf_tpg_nacl_stat_cit;
+	struct config_item_type tf_tpg_mappedlun_cit;
+	struct config_item_type tf_tpg_mappedlun_stat_cit;
+};
+
+/* target_core_alua.c */
+extern struct t10_alua_lu_gp *default_lu_gp;
+
+/* target_core_device.c */
+extern struct mutex g_device_mutex;
+extern struct list_head g_device_list;
+
+int	core_alloc_rtpi(struct se_lun *lun, struct se_device *dev);
+struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
+void	target_pr_kref_release(struct kref *);
+void	core_free_device_list_for_node(struct se_node_acl *,
+		struct se_portal_group *);
+void	core_update_device_list_access(u64, u32, struct se_node_acl *);
+struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
+int	core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+		u64, u32, struct se_node_acl *, struct se_portal_group *);
+void	core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
+		struct se_node_acl *, struct se_portal_group *);
+void	core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
+int	core_dev_add_lun(struct se_portal_group *, struct se_device *,
+		struct se_lun *lun);
+void	core_dev_del_lun(struct se_portal_group *, struct se_lun *);
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+		struct se_node_acl *, u64, int *);
+int	core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+		struct se_lun_acl *, struct se_lun *lun, u32);
+int	core_dev_del_initiator_node_lun_acl(struct se_lun *,
+		struct se_lun_acl *);
+void	core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
+		struct se_lun_acl *lacl);
+int	core_dev_setup_virtual_lun0(void);
+void	core_dev_release_virtual_lun0(void);
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
+int	target_configure_device(struct se_device *dev);
+void	target_free_device(struct se_device *);
+
+/* target_core_configfs.c */
+void	target_setup_backend_cits(struct target_backend *);
+
+/* target_core_fabric_configfs.c */
+int	target_fabric_setup_cits(struct target_fabric_configfs *);
+
+/* target_core_fabric_lib.c */
+int	target_get_pr_transport_id_len(struct se_node_acl *nacl,
+		struct t10_pr_registration *pr_reg, int *format_code);
+int	target_get_pr_transport_id(struct se_node_acl *nacl,
+		struct t10_pr_registration *pr_reg, int *format_code,
+		unsigned char *buf);
+const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+		const char *buf, u32 *out_tid_len, char **port_nexus_ptr);
+
+/* target_core_hba.c */
+struct se_hba *core_alloc_hba(const char *, u32, u32);
+int	core_delete_hba(struct se_hba *);
+
+/* target_core_tmr.c */
+void	core_tmr_abort_task(struct se_device *, struct se_tmr_req *,
+			struct se_session *);
+int	core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
+		struct list_head *, struct se_cmd *);
+
+/* target_core_tpg.c */
+extern struct se_device *g_lun0_dev;
+
+struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+		const char *);
+void	core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
+				  struct se_lun *);
+void	core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
+struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
+int	core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
+		u32, struct se_device *);
+void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
+struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
+		const char *initiatorname);
+void core_tpg_del_initiator_node_acl(struct se_node_acl *acl);
+
+/* target_core_transport.c */
+extern struct kmem_cache *se_tmr_req_cache;
+
+int	init_se_kmem_caches(void);
+void	release_se_kmem_caches(void);
+u32	scsi_get_new_index(scsi_index_t);
+void	transport_subsystem_check_init(void);
+int	transport_cmd_finish_abort(struct se_cmd *, int);
+unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+void	transport_dump_dev_state(struct se_device *, char *, int *);
+void	transport_dump_dev_info(struct se_device *, struct se_lun *,
+		unsigned long long, char *, int *);
+void	transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
+int	transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
+int	transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
+int	transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
+bool	target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
+void	transport_clear_lun_ref(struct se_lun *);
+void	transport_send_task_abort(struct se_cmd *);
+sense_reason_t	target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
+void	target_qf_do_work(struct work_struct *work);
+bool	target_check_wce(struct se_device *dev);
+bool	target_check_fua(struct se_device *dev);
+void	__target_execute_cmd(struct se_cmd *, bool);
+
+/* target_core_stat.c */
+void	target_stat_setup_dev_default_groups(struct se_device *);
+void	target_stat_setup_port_default_groups(struct se_lun *);
+void	target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
+
+/* target_core_xcopy.c */
+extern struct se_portal_group xcopy_pt_tpg;
+
+#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
new file mode 100644
index 0000000..e793311
--- /dev/null
+++ b/drivers/target/target_core_pr.c
@@ -0,0 +1,4167 @@
+/*******************************************************************************
+ * Filename:  target_core_pr.c
+ *
+ * This file contains SPC-3 compliant persistent reservations and
+ * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
+ *
+ * (c) Copyright 2009-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/file.h>
+#include <scsi/scsi_proto.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+/*
+ * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
+ */
+struct pr_transport_id_holder {
+	struct t10_pr_registration *dest_pr_reg;
+	struct se_portal_group *dest_tpg;
+	struct se_node_acl *dest_node_acl;
+	struct se_dev_entry *dest_se_deve;
+	struct list_head dest_list;
+};
+
+void core_pr_dump_initiator_port(
+	struct t10_pr_registration *pr_reg,
+	char *buf,
+	u32 size)
+{
+	if (!pr_reg->isid_present_at_reg)
+		buf[0] = '\0';
+
+	snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
+}
+
+enum register_type {
+	REGISTER,
+	REGISTER_AND_IGNORE_EXISTING_KEY,
+	REGISTER_AND_MOVE,
+};
+
+enum preempt_type {
+	PREEMPT,
+	PREEMPT_AND_ABORT,
+};
+
+static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
+					      struct t10_pr_registration *, int, int);
+
+static int is_reservation_holder(
+	struct t10_pr_registration *pr_res_holder,
+	struct t10_pr_registration *pr_reg)
+{
+	int pr_res_type;
+
+	if (pr_res_holder) {
+		pr_res_type = pr_res_holder->pr_res_type;
+
+		return pr_res_holder == pr_reg ||
+		       pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
+		       pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG;
+	}
+	return 0;
+}
+
+static sense_reason_t
+target_scsi2_reservation_check(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+
+	switch (cmd->t_task_cdb[0]) {
+	case INQUIRY:
+	case RELEASE:
+	case RELEASE_10:
+		return 0;
+	default:
+		break;
+	}
+
+	if (!dev->dev_reserved_node_acl || !sess)
+		return 0;
+
+	if (dev->dev_reserved_node_acl != sess->se_node_acl)
+		return TCM_RESERVATION_CONFLICT;
+
+	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
+		if (dev->dev_res_bin_isid != sess->sess_bin_isid)
+			return TCM_RESERVATION_CONFLICT;
+	}
+
+	return 0;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
+					struct se_node_acl *, struct se_session *);
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
+
+static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_device *dev = cmd->se_dev;
+	struct t10_pr_registration *pr_reg;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	int conflict = 0;
+
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+			se_sess);
+	if (pr_reg) {
+		/*
+		 * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE
+		 * behavior
+		 *
+		 * A RESERVE(6) or RESERVE(10) command shall complete with GOOD
+		 * status, but no reservation shall be established and the
+		 * persistent reservation shall not be changed, if the command
+		 * is received from a) and b) below.
+		 *
+		 * A RELEASE(6) or RELEASE(10) command shall complete with GOOD
+		 * status, but the persistent reservation shall not be released,
+		 * if the command is received from a) and b)
+		 *
+		 * a) An I_T nexus that is a persistent reservation holder; or
+		 * b) An I_T nexus that is registered if a registrants only or
+		 *    all registrants type persistent reservation is present.
+		 *
+		 * In all other cases, a RESERVE(6) command, RESERVE(10) command,
+		 * RELEASE(6) command, or RELEASE(10) command shall be processed
+		 * as defined in SPC-2.
+		 */
+		if (pr_reg->pr_res_holder) {
+			core_scsi3_put_pr_reg(pr_reg);
+			return 1;
+		}
+		if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
+		    (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+			core_scsi3_put_pr_reg(pr_reg);
+			return 1;
+		}
+		core_scsi3_put_pr_reg(pr_reg);
+		conflict = 1;
+	} else {
+		/*
+		 * Following spc2r20 5.5.1 Reservations overview:
+		 *
+		 * If a logical unit has executed a PERSISTENT RESERVE OUT
+		 * command with the REGISTER or the REGISTER AND IGNORE
+		 * EXISTING KEY service action and is still registered by any
+		 * initiator, all RESERVE commands and all RELEASE commands
+		 * regardless of initiator shall conflict and shall terminate
+		 * with a RESERVATION CONFLICT status.
+		 */
+		spin_lock(&pr_tmpl->registration_lock);
+		conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1;
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+
+	if (conflict) {
+		pr_err("Received legacy SPC-2 RESERVE/RELEASE"
+			" while active SPC-3 registrations exist,"
+			" returning RESERVATION_CONFLICT\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+sense_reason_t
+target_scsi2_reservation_release(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	struct se_portal_group *tpg;
+	int rc;
+
+	if (!sess || !sess->se_tpg)
+		goto out;
+	rc = target_check_scsi2_reservation_conflict(cmd);
+	if (rc == 1)
+		goto out;
+	if (rc < 0)
+		return TCM_RESERVATION_CONFLICT;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_reserved_node_acl || !sess)
+		goto out_unlock;
+
+	if (dev->dev_reserved_node_acl != sess->se_node_acl)
+		goto out_unlock;
+
+	if (dev->dev_res_bin_isid != sess->sess_bin_isid)
+		goto out_unlock;
+
+	dev->dev_reserved_node_acl = NULL;
+	dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
+	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
+		dev->dev_res_bin_isid = 0;
+		dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
+	}
+	tpg = sess->se_tpg;
+	pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->"
+		" MAPPED LUN: %llu for %s\n",
+		tpg->se_tpg_tfo->get_fabric_name(),
+		cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
+		sess->se_node_acl->initiatorname);
+
+out_unlock:
+	spin_unlock(&dev->dev_reservation_lock);
+out:
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
+sense_reason_t
+target_scsi2_reservation_reserve(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	struct se_portal_group *tpg;
+	sense_reason_t ret = 0;
+	int rc;
+
+	if ((cmd->t_task_cdb[1] & 0x01) &&
+	    (cmd->t_task_cdb[1] & 0x02)) {
+		pr_err("LongIO and Obselete Bits set, returning"
+				" ILLEGAL_REQUEST\n");
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+	}
+	/*
+	 * This is currently the case for target_core_mod passthrough struct se_cmd
+	 * ops
+	 */
+	if (!sess || !sess->se_tpg)
+		goto out;
+	rc = target_check_scsi2_reservation_conflict(cmd);
+	if (rc == 1)
+		goto out;
+
+	if (rc < 0)
+		return TCM_RESERVATION_CONFLICT;
+
+	tpg = sess->se_tpg;
+	spin_lock(&dev->dev_reservation_lock);
+	if (dev->dev_reserved_node_acl &&
+	   (dev->dev_reserved_node_acl != sess->se_node_acl)) {
+		pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+			tpg->se_tpg_tfo->get_fabric_name());
+		pr_err("Original reserver LUN: %llu %s\n",
+			cmd->se_lun->unpacked_lun,
+			dev->dev_reserved_node_acl->initiatorname);
+		pr_err("Current attempt - LUN: %llu -> MAPPED LUN: %llu"
+			" from %s \n", cmd->se_lun->unpacked_lun,
+			cmd->orig_fe_lun,
+			sess->se_node_acl->initiatorname);
+		ret = TCM_RESERVATION_CONFLICT;
+		goto out_unlock;
+	}
+
+	dev->dev_reserved_node_acl = sess->se_node_acl;
+	dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS;
+	if (sess->sess_bin_isid != 0) {
+		dev->dev_res_bin_isid = sess->sess_bin_isid;
+		dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
+	}
+	pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu"
+		" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+		cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
+		sess->se_node_acl->initiatorname);
+
+out_unlock:
+	spin_unlock(&dev->dev_reservation_lock);
+out:
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
+	return ret;
+}
+
+
+/*
+ * Begin SPC-3/SPC-4 Persistent Reservations emulation support
+ *
+ * This function is called by those initiator ports who are *NOT*
+ * the active PR reservation holder when a reservation is present.
+ */
+static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
+					bool isid_mismatch)
+{
+	unsigned char *cdb = cmd->t_task_cdb;
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_node_acl *nacl = se_sess->se_node_acl;
+	int other_cdb = 0;
+	int registered_nexus = 0, ret = 1; /* Conflict by default */
+	int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
+	int we = 0; /* Write Exclusive */
+	int legacy = 0; /* Act like a legacy device and return
+			 * RESERVATION CONFLICT on some CDBs */
+
+	if (isid_mismatch) {
+		registered_nexus = 0;
+	} else {
+		struct se_dev_entry *se_deve;
+
+		rcu_read_lock();
+		se_deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+		if (se_deve)
+			registered_nexus = test_bit(DEF_PR_REG_ACTIVE,
+						    &se_deve->deve_flags);
+		rcu_read_unlock();
+	}
+
+	switch (pr_reg_type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+		/*
+		 * Some commands are only allowed for the persistent reservation
+		 * holder.
+		 */
+		break;
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+		/*
+		 * Some commands are only allowed for registered I_T Nexuses.
+		 */
+		reg_only = 1;
+		break;
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		/*
+		 * Each registered I_T Nexus is a reservation holder.
+		 */
+		all_reg = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/*
+	 * Referenced from spc4r17 table 45 for *NON* PR holder access
+	 */
+	switch (cdb[0]) {
+	case SECURITY_PROTOCOL_IN:
+		if (registered_nexus)
+			return 0;
+		ret = (we) ? 0 : 1;
+		break;
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+	case READ_ATTRIBUTE:
+	case READ_BUFFER:
+	case RECEIVE_DIAGNOSTIC:
+		if (legacy) {
+			ret = 1;
+			break;
+		}
+		if (registered_nexus) {
+			ret = 0;
+			break;
+		}
+		ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+		break;
+	case PERSISTENT_RESERVE_OUT:
+		/*
+		 * This follows PERSISTENT_RESERVE_OUT service actions that
+		 * are allowed in the presence of various reservations.
+		 * See spc4r17, table 46
+		 */
+		switch (cdb[1] & 0x1f) {
+		case PRO_CLEAR:
+		case PRO_PREEMPT:
+		case PRO_PREEMPT_AND_ABORT:
+			ret = (registered_nexus) ? 0 : 1;
+			break;
+		case PRO_REGISTER:
+		case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+			ret = 0;
+			break;
+		case PRO_REGISTER_AND_MOVE:
+		case PRO_RESERVE:
+			ret = 1;
+			break;
+		case PRO_RELEASE:
+			ret = (registered_nexus) ? 0 : 1;
+			break;
+		default:
+			pr_err("Unknown PERSISTENT_RESERVE_OUT service"
+				" action: 0x%02x\n", cdb[1] & 0x1f);
+			return -EINVAL;
+		}
+		break;
+	case RELEASE:
+	case RELEASE_10:
+		/* Handled by CRH=1 in target_scsi2_reservation_release() */
+		ret = 0;
+		break;
+	case RESERVE:
+	case RESERVE_10:
+		/* Handled by CRH=1 in target_scsi2_reservation_reserve() */
+		ret = 0;
+		break;
+	case TEST_UNIT_READY:
+		ret = (legacy) ? 1 : 0; /* Conflict for legacy */
+		break;
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_MANAGEMENT_PROTOCOL_IN:
+			if (registered_nexus) {
+				ret = 0;
+				break;
+			}
+			ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+			break;
+		case MI_REPORT_SUPPORTED_OPERATION_CODES:
+		case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
+			if (legacy) {
+				ret = 1;
+				break;
+			}
+			if (registered_nexus) {
+				ret = 0;
+				break;
+			}
+			ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+			break;
+		case MI_REPORT_ALIASES:
+		case MI_REPORT_IDENTIFYING_INFORMATION:
+		case MI_REPORT_PRIORITY:
+		case MI_REPORT_TARGET_PGS:
+		case MI_REPORT_TIMESTAMP:
+			ret = 0; /* Allowed */
+			break;
+		default:
+			pr_err("Unknown MI Service Action: 0x%02x\n",
+				(cdb[1] & 0x1f));
+			return -EINVAL;
+		}
+		break;
+	case ACCESS_CONTROL_IN:
+	case ACCESS_CONTROL_OUT:
+	case INQUIRY:
+	case LOG_SENSE:
+	case SERVICE_ACTION_IN_12:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+	case PERSISTENT_RESERVE_IN:
+		ret = 0; /*/ Allowed CDBs */
+		break;
+	default:
+		other_cdb = 1;
+		break;
+	}
+	/*
+	 * Case where the CDB is explicitly allowed in the above switch
+	 * statement.
+	 */
+	if (!ret && !other_cdb) {
+		pr_debug("Allowing explicit CDB: 0x%02x for %s"
+			" reservation holder\n", cdb[0],
+			core_scsi3_pr_dump_type(pr_reg_type));
+
+		return ret;
+	}
+	/*
+	 * Check if write exclusive initiator ports *NOT* holding the
+	 * WRITE_EXCLUSIVE_* reservation.
+	 */
+	if (we && !registered_nexus) {
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			/*
+			 * Conflict for write exclusive
+			 */
+			pr_debug("%s Conflict for unregistered nexus"
+				" %s CDB: 0x%02x to %s reservation\n",
+				transport_dump_cmd_direction(cmd),
+				se_sess->se_node_acl->initiatorname, cdb[0],
+				core_scsi3_pr_dump_type(pr_reg_type));
+			return 1;
+		} else {
+			/*
+			 * Allow non WRITE CDBs for all Write Exclusive
+			 * PR TYPEs to pass for registered and
+			 * non-registered_nexuxes NOT holding the reservation.
+			 *
+			 * We only make noise for the unregisterd nexuses,
+			 * as we expect registered non-reservation holding
+			 * nexuses to issue CDBs.
+			 */
+
+			if (!registered_nexus) {
+				pr_debug("Allowing implicit CDB: 0x%02x"
+					" for %s reservation on unregistered"
+					" nexus\n", cdb[0],
+					core_scsi3_pr_dump_type(pr_reg_type));
+			}
+
+			return 0;
+		}
+	} else if ((reg_only) || (all_reg)) {
+		if (registered_nexus) {
+			/*
+			 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
+			 * allow commands from registered nexuses.
+			 */
+
+			pr_debug("Allowing implicit CDB: 0x%02x for %s"
+				" reservation\n", cdb[0],
+				core_scsi3_pr_dump_type(pr_reg_type));
+
+			return 0;
+		}
+       } else if (we && registered_nexus) {
+               /*
+                * Reads are allowed for Write Exclusive locks
+                * from all registrants.
+                */
+               if (cmd->data_direction == DMA_FROM_DEVICE) {
+                       pr_debug("Allowing READ CDB: 0x%02x for %s"
+                               " reservation\n", cdb[0],
+                               core_scsi3_pr_dump_type(pr_reg_type));
+
+                       return 0;
+               }
+	}
+	pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+		" for %s reservation\n", transport_dump_cmd_direction(cmd),
+		(registered_nexus) ? "" : "un",
+		se_sess->se_node_acl->initiatorname, cdb[0],
+		core_scsi3_pr_dump_type(pr_reg_type));
+
+	return 1; /* Conflict by default */
+}
+
+static sense_reason_t
+target_scsi3_pr_reservation_check(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	u32 pr_reg_type;
+	bool isid_mismatch = false;
+
+	if (!dev->dev_pr_res_holder)
+		return 0;
+
+	pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+	cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+	if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl)
+		goto check_nonholder;
+
+	if (dev->dev_pr_res_holder->isid_present_at_reg) {
+		if (dev->dev_pr_res_holder->pr_reg_bin_isid !=
+		    sess->sess_bin_isid) {
+			isid_mismatch = true;
+			goto check_nonholder;
+		}
+	}
+
+	return 0;
+
+check_nonholder:
+	if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type, isid_mismatch))
+		return TCM_RESERVATION_CONFLICT;
+	return 0;
+}
+
+static u32 core_scsi3_pr_generation(struct se_device *dev)
+{
+	u32 prg;
+
+	/*
+	 * PRGeneration field shall contain the value of a 32-bit wrapping
+	 * counter mainted by the device server.
+	 *
+	 * Note that this is done regardless of Active Persist across
+	 * Target PowerLoss (APTPL)
+	 *
+	 * See spc4r17 section 6.3.12 READ_KEYS service action
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	prg = dev->t10_pr.pr_generation++;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return prg;
+}
+
+static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_lun *lun,
+	struct se_dev_entry *dest_deve,
+	u64 mapped_lun,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct t10_pr_registration *pr_reg;
+
+	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
+	if (!pr_reg) {
+		pr_err("Unable to allocate struct t10_pr_registration\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+	atomic_set(&pr_reg->pr_res_holders, 0);
+	pr_reg->pr_reg_nacl = nacl;
+	/*
+	 * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1,
+	 * the se_dev_entry->pr_ref will have been already obtained by
+	 * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration().
+	 *
+	 * Otherwise, locate se_dev_entry now and obtain a reference until
+	 * registration completes in __core_scsi3_add_registration().
+	 */
+	if (dest_deve) {
+		pr_reg->pr_reg_deve = dest_deve;
+	} else {
+		rcu_read_lock();
+		pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
+		if (!pr_reg->pr_reg_deve) {
+			rcu_read_unlock();
+			pr_err("Unable to locate PR deve %s mapped_lun: %llu\n",
+				nacl->initiatorname, mapped_lun);
+			kmem_cache_free(t10_pr_reg_cache, pr_reg);
+			return NULL;
+		}
+		kref_get(&pr_reg->pr_reg_deve->pr_kref);
+		rcu_read_unlock();
+	}
+	pr_reg->pr_res_mapped_lun = mapped_lun;
+	pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
+	pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
+	pr_reg->pr_res_key = sa_res_key;
+	pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+	pr_reg->pr_reg_aptpl = aptpl;
+	/*
+	 * If an ISID value for this SCSI Initiator Port exists,
+	 * save it to the registration now.
+	 */
+	if (isid != NULL) {
+		pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+		snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+		pr_reg->isid_present_at_reg = 1;
+	}
+
+	return pr_reg;
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *);
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
+
+/*
+ * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0
+ * modes.
+ */
+static struct t10_pr_registration *__core_scsi3_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_lun *lun,
+	struct se_dev_entry *deve,
+	u64 mapped_lun,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_dev_entry *deve_tmp;
+	struct se_node_acl *nacl_tmp;
+	struct se_lun_acl *lacl_tmp;
+	struct se_lun *lun_tmp, *next, *dest_lun;
+	const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
+	int ret;
+	/*
+	 * Create a registration for the I_T Nexus upon which the
+	 * PROUT REGISTER was received.
+	 */
+	pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, lun, deve, mapped_lun,
+						    isid, sa_res_key, all_tg_pt,
+						    aptpl);
+	if (!pr_reg)
+		return NULL;
+	/*
+	 * Return pointer to pr_reg for ALL_TG_PT=0
+	 */
+	if (!all_tg_pt)
+		return pr_reg;
+	/*
+	 * Create list of matching SCSI Initiator Port registrations
+	 * for ALL_TG_PT=1
+	 */
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(lun_tmp, next, &dev->dev_sep_list, lun_dev_link) {
+		if (!percpu_ref_tryget_live(&lun_tmp->lun_ref))
+			continue;
+		spin_unlock(&dev->se_port_lock);
+
+		spin_lock(&lun_tmp->lun_deve_lock);
+		list_for_each_entry(deve_tmp, &lun_tmp->lun_deve_list, lun_link) {
+			/*
+			 * This pointer will be NULL for demo mode MappedLUNs
+			 * that have not been make explicit via a ConfigFS
+			 * MappedLUN group for the SCSI Initiator Node ACL.
+			 */
+			if (!deve_tmp->se_lun_acl)
+				continue;
+
+			lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl,
+						lockdep_is_held(&lun_tmp->lun_deve_lock));
+			nacl_tmp = lacl_tmp->se_lun_nacl;
+			/*
+			 * Skip the matching struct se_node_acl that is allocated
+			 * above..
+			 */
+			if (nacl == nacl_tmp)
+				continue;
+			/*
+			 * Only perform PR registrations for target ports on
+			 * the same fabric module as the REGISTER w/ ALL_TG_PT=1
+			 * arrived.
+			 */
+			if (tfo != nacl_tmp->se_tpg->se_tpg_tfo)
+				continue;
+			/*
+			 * Look for a matching Initiator Node ACL in ASCII format
+			 */
+			if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
+				continue;
+
+			kref_get(&deve_tmp->pr_kref);
+			spin_unlock(&lun_tmp->lun_deve_lock);
+			/*
+			 * Grab a configfs group dependency that is released
+			 * for the exception path at label out: below, or upon
+			 * completion of adding ALL_TG_PT=1 registrations in
+			 * __core_scsi3_add_registration()
+			 */
+			ret = core_scsi3_lunacl_depend_item(deve_tmp);
+			if (ret < 0) {
+				pr_err("core_scsi3_lunacl_depend"
+						"_item() failed\n");
+				percpu_ref_put(&lun_tmp->lun_ref);
+				kref_put(&deve_tmp->pr_kref, target_pr_kref_release);
+				goto out;
+			}
+			/*
+			 * Located a matching SCSI Initiator Port on a different
+			 * port, allocate the pr_reg_atp and attach it to the
+			 * pr_reg->pr_reg_atp_list that will be processed once
+			 * the original *pr_reg is processed in
+			 * __core_scsi3_add_registration()
+			 */
+			dest_lun = rcu_dereference_check(deve_tmp->se_lun,
+				atomic_read(&deve_tmp->pr_kref.refcount) != 0);
+
+			pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
+						nacl_tmp, dest_lun, deve_tmp,
+						deve_tmp->mapped_lun, NULL,
+						sa_res_key, all_tg_pt, aptpl);
+			if (!pr_reg_atp) {
+				percpu_ref_put(&lun_tmp->lun_ref);
+				core_scsi3_lunacl_undepend_item(deve_tmp);
+				goto out;
+			}
+
+			list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
+				      &pr_reg->pr_reg_atp_list);
+			spin_lock(&lun_tmp->lun_deve_lock);
+		}
+		spin_unlock(&lun_tmp->lun_deve_lock);
+
+		spin_lock(&dev->se_port_lock);
+		percpu_ref_put(&lun_tmp->lun_ref);
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return pr_reg;
+out:
+	list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+			&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+		list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+	}
+	kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	return NULL;
+}
+
+int core_scsi3_alloc_aptpl_registration(
+	struct t10_reservation *pr_tmpl,
+	u64 sa_res_key,
+	unsigned char *i_port,
+	unsigned char *isid,
+	u64 mapped_lun,
+	unsigned char *t_port,
+	u16 tpgt,
+	u64 target_lun,
+	int res_holder,
+	int all_tg_pt,
+	u8 type)
+{
+	struct t10_pr_registration *pr_reg;
+
+	if (!i_port || !t_port || !sa_res_key) {
+		pr_err("Illegal parameters for APTPL registration\n");
+		return -EINVAL;
+	}
+
+	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
+	if (!pr_reg) {
+		pr_err("Unable to allocate struct t10_pr_registration\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+	atomic_set(&pr_reg->pr_res_holders, 0);
+	pr_reg->pr_reg_nacl = NULL;
+	pr_reg->pr_reg_deve = NULL;
+	pr_reg->pr_res_mapped_lun = mapped_lun;
+	pr_reg->pr_aptpl_target_lun = target_lun;
+	pr_reg->pr_res_key = sa_res_key;
+	pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+	pr_reg->pr_reg_aptpl = 1;
+	pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
+	pr_reg->pr_res_type = type;
+	/*
+	 * If an ISID value had been saved in APTPL metadata for this
+	 * SCSI Initiator Port, restore it now.
+	 */
+	if (isid != NULL) {
+		pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+		snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+		pr_reg->isid_present_at_reg = 1;
+	}
+	/*
+	 * Copy the i_port and t_port information from caller.
+	 */
+	snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port);
+	snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port);
+	pr_reg->pr_reg_tpgt = tpgt;
+	/*
+	 * Set pr_res_holder from caller, the pr_reg who is the reservation
+	 * holder will get it's pointer set in core_scsi3_aptpl_reserve() once
+	 * the Initiator Node LUN ACL from the fabric module is created for
+	 * this registration.
+	 */
+	pr_reg->pr_res_holder = res_holder;
+
+	list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
+	pr_debug("SPC-3 PR APTPL Successfully added registration%s from"
+			" metadata\n", (res_holder) ? "+reservation" : "");
+	return 0;
+}
+
+static void core_scsi3_aptpl_reserve(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_node_acl *node_acl,
+	struct t10_pr_registration *pr_reg)
+{
+	char i_buf[PR_REG_ISID_ID_LEN];
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
+
+	spin_lock(&dev->dev_reservation_lock);
+	dev->dev_pr_res_holder = pr_reg;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+		" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tpg->se_tpg_tfo->get_fabric_name(),
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
+		tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
+		i_buf);
+}
+
+static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
+				struct t10_pr_registration *, enum register_type, int);
+
+static int __core_scsi3_check_aptpl_registration(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	u64 target_lun,
+	struct se_node_acl *nacl,
+	u64 mapped_lun)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
+	unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+	u16 tpgt;
+
+	memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
+	memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
+	/*
+	 * Copy Initiator Port information from struct se_node_acl
+	 */
+	snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
+	snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
+			tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+	tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
+	/*
+	 * Look for the matching registrations+reservation from those
+	 * created from APTPL metadata.  Note that multiple registrations
+	 * may exist for fabrics that use ISIDs in their SCSI Initiator Port
+	 * TransportIDs.
+	 */
+	spin_lock(&pr_tmpl->aptpl_reg_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+				pr_reg_aptpl_list) {
+
+		if (!strcmp(pr_reg->pr_iport, i_port) &&
+		     (pr_reg->pr_res_mapped_lun == mapped_lun) &&
+		    !(strcmp(pr_reg->pr_tport, t_port)) &&
+		     (pr_reg->pr_reg_tpgt == tpgt) &&
+		     (pr_reg->pr_aptpl_target_lun == target_lun)) {
+			/*
+			 * Obtain the ->pr_reg_deve pointer + reference, that
+			 * is released by __core_scsi3_add_registration() below.
+			 */
+			rcu_read_lock();
+			pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
+			if (!pr_reg->pr_reg_deve) {
+				pr_err("Unable to locate PR APTPL %s mapped_lun:"
+					" %llu\n", nacl->initiatorname, mapped_lun);
+				rcu_read_unlock();
+				continue;
+			}
+			kref_get(&pr_reg->pr_reg_deve->pr_kref);
+			rcu_read_unlock();
+
+			pr_reg->pr_reg_nacl = nacl;
+			pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
+			list_del(&pr_reg->pr_reg_aptpl_list);
+			spin_unlock(&pr_tmpl->aptpl_reg_lock);
+			/*
+			 * At this point all of the pointers in *pr_reg will
+			 * be setup, so go ahead and add the registration.
+			 */
+			__core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
+			/*
+			 * If this registration is the reservation holder,
+			 * make that happen now..
+			 */
+			if (pr_reg->pr_res_holder)
+				core_scsi3_aptpl_reserve(dev, tpg,
+						nacl, pr_reg);
+			/*
+			 * Reenable pr_aptpl_active to accept new metadata
+			 * updates once the SCSI device is active again..
+			 */
+			spin_lock(&pr_tmpl->aptpl_reg_lock);
+			pr_tmpl->pr_aptpl_active = 1;
+		}
+	}
+	spin_unlock(&pr_tmpl->aptpl_reg_lock);
+
+	return 0;
+}
+
+int core_scsi3_check_aptpl_registration(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	struct se_node_acl *nacl,
+	u64 mapped_lun)
+{
+	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+		return 0;
+
+	return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
+						     lun->unpacked_lun, nacl,
+						     mapped_lun);
+}
+
+static void __core_scsi3_dump_registration(
+	const struct target_core_fabric_ops *tfo,
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct t10_pr_registration *pr_reg,
+	enum register_type register_type)
+{
+	struct se_portal_group *se_tpg = nacl->se_tpg;
+	char i_buf[PR_REG_ISID_ID_LEN];
+
+	memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
+	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
+
+	pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+		" Node: %s%s\n", tfo->get_fabric_name(), (register_type == REGISTER_AND_MOVE) ?
+		"_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ?
+		"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
+		i_buf);
+	pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+		 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
+		tfo->tpg_get_tag(se_tpg));
+	pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+		" Port(s)\n",  tfo->get_fabric_name(),
+		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+		dev->transport->name);
+	pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+		" 0x%08x  APTPL: %d\n", tfo->get_fabric_name(),
+		pr_reg->pr_res_key, pr_reg->pr_res_generation,
+		pr_reg->pr_reg_aptpl);
+}
+
+static void __core_scsi3_add_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct t10_pr_registration *pr_reg,
+	enum register_type register_type,
+	int register_move)
+{
+	const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	struct se_dev_entry *deve;
+
+	/*
+	 * Increment PRgeneration counter for struct se_device upon a successful
+	 * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action
+	 *
+	 * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service
+	 * action, the struct se_device->dev_reservation_lock will already be held,
+	 * so we do not call core_scsi3_pr_generation() which grabs the lock
+	 * for the REGISTER.
+	 */
+	pr_reg->pr_res_generation = (register_move) ?
+			dev->t10_pr.pr_generation++ :
+			core_scsi3_pr_generation(dev);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
+
+	__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
+	 */
+	if (!pr_reg->pr_reg_all_tg_pt || register_move)
+		goto out;
+	/*
+	 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
+	 * allocated in __core_scsi3_alloc_registration()
+	 */
+	list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+			&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+		struct se_node_acl *nacl_tmp = pr_reg_tmp->pr_reg_nacl;
+
+		list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+
+		pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
+
+		spin_lock(&pr_tmpl->registration_lock);
+		list_add_tail(&pr_reg_tmp->pr_reg_list,
+			      &pr_tmpl->registration_list);
+
+		__core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp,
+					       register_type);
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * Drop configfs group dependency reference and deve->pr_kref
+		 * obtained from  __core_scsi3_alloc_registration() code.
+		 */
+		rcu_read_lock();
+		deve = pr_reg_tmp->pr_reg_deve;
+		if (deve) {
+			set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+			core_scsi3_lunacl_undepend_item(deve);
+			pr_reg_tmp->pr_reg_deve = NULL;
+		}
+		rcu_read_unlock();
+	}
+out:
+	/*
+	 * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration()
+	 */
+	rcu_read_lock();
+	deve = pr_reg->pr_reg_deve;
+	if (deve) {
+		set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+		kref_put(&deve->pr_kref, target_pr_kref_release);
+		pr_reg->pr_reg_deve = NULL;
+	}
+	rcu_read_unlock();
+}
+
+static int core_scsi3_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_lun *lun,
+	struct se_dev_entry *deve,
+	u64 mapped_lun,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl,
+	enum register_type register_type,
+	int register_move)
+{
+	struct t10_pr_registration *pr_reg;
+
+	pr_reg = __core_scsi3_alloc_registration(dev, nacl, lun, deve, mapped_lun,
+						 isid, sa_res_key, all_tg_pt,
+						 aptpl);
+	if (!pr_reg)
+		return -EPERM;
+
+	__core_scsi3_add_registration(dev, nacl, pr_reg,
+			register_type, register_move);
+	return 0;
+}
+
+static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	unsigned char *isid)
+{
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct se_portal_group *tpg;
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+		/*
+		 * First look for a matching struct se_node_acl
+		 */
+		if (pr_reg->pr_reg_nacl != nacl)
+			continue;
+
+		tpg = pr_reg->pr_reg_nacl->se_tpg;
+		/*
+		 * If this registration does NOT contain a fabric provided
+		 * ISID, then we have found a match.
+		 */
+		if (!pr_reg->isid_present_at_reg) {
+			/*
+			 * Determine if this SCSI device server requires that
+			 * SCSI Intiatior TransportID w/ ISIDs is enforced
+			 * for fabric modules (iSCSI) requiring them.
+			 */
+			if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
+				if (dev->dev_attrib.enforce_pr_isids)
+					continue;
+			}
+			atomic_inc_mb(&pr_reg->pr_res_holders);
+			spin_unlock(&pr_tmpl->registration_lock);
+			return pr_reg;
+		}
+		/*
+		 * If the *pr_reg contains a fabric defined ISID for multi-value
+		 * SCSI Initiator Port TransportIDs, then we expect a valid
+		 * matching ISID to be provided by the local SCSI Initiator Port.
+		 */
+		if (!isid)
+			continue;
+		if (strcmp(isid, pr_reg->pr_reg_isid))
+			continue;
+
+		atomic_inc_mb(&pr_reg->pr_res_holders);
+		spin_unlock(&pr_tmpl->registration_lock);
+		return pr_reg;
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	return NULL;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_session *sess)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+	unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+
+	if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
+		memset(&buf[0], 0, PR_REG_ISID_LEN);
+		tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0],
+					PR_REG_ISID_LEN);
+		isid_ptr = &buf[0];
+	}
+
+	return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr);
+}
+
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
+{
+	atomic_dec_mb(&pr_reg->pr_res_holders);
+}
+
+static int core_scsi3_check_implicit_release(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg)
+{
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	struct t10_pr_registration *pr_res_holder;
+	int ret = 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!pr_res_holder) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return ret;
+	}
+	if (pr_res_holder == pr_reg) {
+		/*
+		 * Perform an implicit RELEASE if the registration that
+		 * is being released is holding the reservation.
+		 *
+		 * From spc4r17, section 5.7.11.1:
+		 *
+		 * e) If the I_T nexus is the persistent reservation holder
+		 *    and the persistent reservation is not an all registrants
+		 *    type, then a PERSISTENT RESERVE OUT command with REGISTER
+		 *    service action or REGISTER AND  IGNORE EXISTING KEY
+		 *    service action with the SERVICE ACTION RESERVATION KEY
+		 *    field set to zero (see 5.7.11.3).
+		 */
+		__core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1);
+		ret = 1;
+		/*
+		 * For 'All Registrants' reservation types, all existing
+		 * registrations are still processed as reservation holders
+		 * in core_scsi3_pr_seq_non_holder() after the initial
+		 * reservation holder is implicitly released here.
+		 */
+	} else if (pr_reg->pr_reg_all_tg_pt &&
+		  (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
+			  pr_reg->pr_reg_nacl->initiatorname)) &&
+		  (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
+		pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1"
+			" UNREGISTER while existing reservation with matching"
+			" key 0x%016Lx is present from another SCSI Initiator"
+			" Port\n", pr_reg->pr_res_key);
+		ret = -EPERM;
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+/*
+ * Called with struct t10_reservation->registration_lock held.
+ */
+static void __core_scsi3_free_registration(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg,
+	struct list_head *preempt_and_abort_list,
+	int dec_holders)
+	__releases(&pr_tmpl->registration_lock)
+	__acquires(&pr_tmpl->registration_lock)
+{
+	const struct target_core_fabric_ops *tfo =
+			pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	struct se_dev_entry *deve;
+	char i_buf[PR_REG_ISID_ID_LEN];
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
+
+	if (!list_empty(&pr_reg->pr_reg_list))
+		list_del(&pr_reg->pr_reg_list);
+	/*
+	 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
+	 * so call core_scsi3_put_pr_reg() to decrement our reference.
+	 */
+	if (dec_holders)
+		core_scsi3_put_pr_reg(pr_reg);
+
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Wait until all reference from any other I_T nexuses for this
+	 * *pr_reg have been released.  Because list_del() is called above,
+	 * the last core_scsi3_put_pr_reg(pr_reg) will release this reference
+	 * count back to zero, and we release *pr_reg.
+	 */
+	while (atomic_read(&pr_reg->pr_res_holders) != 0) {
+		pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
+				tfo->get_fabric_name());
+		cpu_relax();
+	}
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, pr_reg->pr_res_mapped_lun);
+	if (deve)
+		clear_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+	rcu_read_unlock();
+
+	spin_lock(&pr_tmpl->registration_lock);
+	pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+		" Node: %s%s\n", tfo->get_fabric_name(),
+		pr_reg->pr_reg_nacl->initiatorname,
+		i_buf);
+	pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+		" Port(s)\n", tfo->get_fabric_name(),
+		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+		dev->transport->name);
+	pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+		" 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
+		pr_reg->pr_res_generation);
+
+	if (!preempt_and_abort_list) {
+		pr_reg->pr_reg_deve = NULL;
+		pr_reg->pr_reg_nacl = NULL;
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+		return;
+	}
+	/*
+	 * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list
+	 * are released once the ABORT_TASK_SET has completed..
+	 */
+	list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list);
+}
+
+void core_scsi3_free_pr_reg_from_nacl(
+	struct se_device *dev,
+	struct se_node_acl *nacl)
+{
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+	bool free_reg = false;
+	/*
+	 * If the passed se_node_acl matches the reservation holder,
+	 * release the reservation.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if ((pr_res_holder != NULL) &&
+	    (pr_res_holder->pr_reg_nacl == nacl)) {
+		__core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1);
+		free_reg = true;
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * Release any registration associated with the struct se_node_acl.
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	if (pr_res_holder && free_reg)
+		__core_scsi3_free_registration(dev, pr_res_holder, NULL, 0);
+
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		if (pr_reg->pr_reg_nacl != nacl)
+			continue;
+
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+}
+
+void core_scsi3_free_all_registrations(
+	struct se_device *dev)
+{
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder != NULL) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+						  pr_res_holder, 0, 0);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	spin_lock(&pr_tmpl->aptpl_reg_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+				pr_reg_aptpl_list) {
+		list_del(&pr_reg->pr_reg_aptpl_list);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	}
+	spin_unlock(&pr_tmpl->aptpl_reg_lock);
+}
+
+static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
+{
+	return target_depend_item(&tpg->tpg_group.cg_item);
+}
+
+static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
+{
+	target_undepend_item(&tpg->tpg_group.cg_item);
+	atomic_dec_mb(&tpg->tpg_pr_ref_count);
+}
+
+static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
+{
+	if (nacl->dynamic_node_acl)
+		return 0;
+	return target_depend_item(&nacl->acl_group.cg_item);
+}
+
+static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
+{
+	if (!nacl->dynamic_node_acl)
+		target_undepend_item(&nacl->acl_group.cg_item);
+	atomic_dec_mb(&nacl->acl_pr_ref_count);
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
+{
+	struct se_lun_acl *lun_acl;
+	struct se_node_acl *nacl;
+	struct se_portal_group *tpg;
+	/*
+	 * For nacl->dynamic_node_acl=1
+	 */
+	lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
+				atomic_read(&se_deve->pr_kref.refcount) != 0);
+	if (!lun_acl)
+		return 0;
+
+	nacl = lun_acl->se_lun_nacl;
+	tpg = nacl->se_tpg;
+
+	return target_depend_item(&lun_acl->se_lun_group.cg_item);
+}
+
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
+{
+	struct se_lun_acl *lun_acl;
+	struct se_node_acl *nacl;
+	struct se_portal_group *tpg;
+	/*
+	 * For nacl->dynamic_node_acl=1
+	 */
+	lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
+				atomic_read(&se_deve->pr_kref.refcount) != 0);
+	if (!lun_acl) {
+		kref_put(&se_deve->pr_kref, target_pr_kref_release);
+		return;
+	}
+	nacl = lun_acl->se_lun_nacl;
+	tpg = nacl->se_tpg;
+
+	target_undepend_item(&lun_acl->se_lun_group.cg_item);
+	kref_put(&se_deve->pr_kref, target_pr_kref_release);
+}
+
+static sense_reason_t
+core_scsi3_decode_spec_i_port(
+	struct se_cmd *cmd,
+	struct se_portal_group *tpg,
+	unsigned char *l_isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_node_acl *dest_node_acl = NULL;
+	struct se_dev_entry *dest_se_deve = NULL;
+	struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
+	struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+	LIST_HEAD(tid_dest_list);
+	struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
+	unsigned char *buf, *ptr, proto_ident;
+	const unsigned char *i_str = NULL;
+	char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
+	sense_reason_t ret;
+	u32 tpdl, tid_len = 0;
+	u32 dest_rtpi = 0;
+
+	/*
+	 * Allocate a struct pr_transport_id_holder and setup the
+	 * local_node_acl pointer and add to struct list_head tid_dest_list
+	 * for add registration processing in the loop of tid_dest_list below.
+	 */
+	tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
+	if (!tidh_new) {
+		pr_err("Unable to allocate tidh_new\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	INIT_LIST_HEAD(&tidh_new->dest_list);
+	tidh_new->dest_tpg = tpg;
+	tidh_new->dest_node_acl = se_sess->se_node_acl;
+
+	local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
+				se_sess->se_node_acl, cmd->se_lun,
+				NULL, cmd->orig_fe_lun, l_isid,
+				sa_res_key, all_tg_pt, aptpl);
+	if (!local_pr_reg) {
+		kfree(tidh_new);
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	tidh_new->dest_pr_reg = local_pr_reg;
+	/*
+	 * The local I_T nexus does not hold any configfs dependances,
+	 * so we set tidh_new->dest_se_deve to NULL to prevent the
+	 * configfs_undepend_item() calls in the tid_dest_list loops below.
+	 */
+	tidh_new->dest_se_deve = NULL;
+	list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+	if (cmd->data_length < 28) {
+		pr_warn("SPC-PR: Received PR OUT parameter list"
+			" length too small: %u\n", cmd->data_length);
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf) {
+		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		goto out;
+	}
+
+	/*
+	 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
+	 * first extract TransportID Parameter Data Length, and make sure
+	 * the value matches up to the SCSI expected data transfer length.
+	 */
+	tpdl = (buf[24] & 0xff) << 24;
+	tpdl |= (buf[25] & 0xff) << 16;
+	tpdl |= (buf[26] & 0xff) << 8;
+	tpdl |= buf[27] & 0xff;
+
+	if ((tpdl + 28) != cmd->data_length) {
+		pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+			" does not equal CDB data_length: %u\n", tpdl,
+			cmd->data_length);
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out_unmap;
+	}
+	/*
+	 * Start processing the received transport IDs using the
+	 * receiving I_T Nexus portal's fabric dependent methods to
+	 * obtain the SCSI Initiator Port/Device Identifiers.
+	 */
+	ptr = &buf[28];
+
+	while (tpdl > 0) {
+		struct se_lun *dest_lun, *tmp_lun;
+
+		proto_ident = (ptr[0] & 0x0f);
+		dest_tpg = NULL;
+
+		spin_lock(&dev->se_port_lock);
+		list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) {
+			tmp_tpg = tmp_lun->lun_tpg;
+
+			/*
+			 * Look for the matching proto_ident provided by
+			 * the received TransportID
+			 */
+			if (tmp_tpg->proto_id != proto_ident)
+				continue;
+			dest_rtpi = tmp_lun->lun_rtpi;
+
+			i_str = target_parse_pr_out_transport_id(tmp_tpg,
+					(const char *)ptr, &tid_len, &iport_ptr);
+			if (!i_str)
+				continue;
+
+			atomic_inc_mb(&tmp_tpg->tpg_pr_ref_count);
+			spin_unlock(&dev->se_port_lock);
+
+			if (core_scsi3_tpg_depend_item(tmp_tpg)) {
+				pr_err(" core_scsi3_tpg_depend_item()"
+					" for tmp_tpg\n");
+				atomic_dec_mb(&tmp_tpg->tpg_pr_ref_count);
+				ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+				goto out_unmap;
+			}
+			/*
+			 * Locate the destination initiator ACL to be registered
+			 * from the decoded fabric module specific TransportID
+			 * at *i_str.
+			 */
+			mutex_lock(&tmp_tpg->acl_node_mutex);
+			dest_node_acl = __core_tpg_get_initiator_node_acl(
+						tmp_tpg, i_str);
+			if (dest_node_acl)
+				atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
+			mutex_unlock(&tmp_tpg->acl_node_mutex);
+
+			if (!dest_node_acl) {
+				core_scsi3_tpg_undepend_item(tmp_tpg);
+				spin_lock(&dev->se_port_lock);
+				continue;
+			}
+
+			if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
+				pr_err("configfs_depend_item() failed"
+					" for dest_node_acl->acl_group\n");
+				atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
+				core_scsi3_tpg_undepend_item(tmp_tpg);
+				ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+				goto out_unmap;
+			}
+
+			dest_tpg = tmp_tpg;
+			pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
+				" %s Port RTPI: %hu\n",
+				dest_tpg->se_tpg_tfo->get_fabric_name(),
+				dest_node_acl->initiatorname, dest_rtpi);
+
+			spin_lock(&dev->se_port_lock);
+			break;
+		}
+		spin_unlock(&dev->se_port_lock);
+
+		if (!dest_tpg) {
+			pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
+					" dest_tpg\n");
+			ret = TCM_INVALID_PARAMETER_LIST;
+			goto out_unmap;
+		}
+
+		pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+			" tid_len: %d for %s + %s\n",
+			dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
+			tpdl, tid_len, i_str, iport_ptr);
+
+		if (tid_len > tpdl) {
+			pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+				" %u for Transport ID: %s\n", tid_len, ptr);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = TCM_INVALID_PARAMETER_LIST;
+			goto out_unmap;
+		}
+		/*
+		 * Locate the desintation struct se_dev_entry pointer for matching
+		 * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus
+		 * Target Port.
+		 */
+		dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
+					dest_rtpi);
+		if (!dest_se_deve) {
+			pr_err("Unable to locate %s dest_se_deve"
+				" from destination RTPI: %hu\n",
+				dest_tpg->se_tpg_tfo->get_fabric_name(),
+				dest_rtpi);
+
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = TCM_INVALID_PARAMETER_LIST;
+			goto out_unmap;
+		}
+
+		if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
+			pr_err("core_scsi3_lunacl_depend_item()"
+					" failed\n");
+			kref_put(&dest_se_deve->pr_kref, target_pr_kref_release);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			goto out_unmap;
+		}
+
+		pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+			" dest_se_deve mapped_lun: %llu\n",
+			dest_tpg->se_tpg_tfo->get_fabric_name(),
+			dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
+
+		/*
+		 * Skip any TransportIDs that already have a registration for
+		 * this target port.
+		 */
+		pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+					iport_ptr);
+		if (pr_reg_e) {
+			core_scsi3_put_pr_reg(pr_reg_e);
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ptr += tid_len;
+			tpdl -= tid_len;
+			tid_len = 0;
+			continue;
+		}
+		/*
+		 * Allocate a struct pr_transport_id_holder and setup
+		 * the dest_node_acl and dest_se_deve pointers for the
+		 * loop below.
+		 */
+		tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
+				GFP_KERNEL);
+		if (!tidh_new) {
+			pr_err("Unable to allocate tidh_new\n");
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			goto out_unmap;
+		}
+		INIT_LIST_HEAD(&tidh_new->dest_list);
+		tidh_new->dest_tpg = dest_tpg;
+		tidh_new->dest_node_acl = dest_node_acl;
+		tidh_new->dest_se_deve = dest_se_deve;
+
+		/*
+		 * Allocate, but do NOT add the registration for the
+		 * TransportID referenced SCSI Initiator port.  This
+		 * done because of the following from spc4r17 in section
+		 * 6.14.3 wrt SPEC_I_PT:
+		 *
+		 * "If a registration fails for any initiator port (e.g., if th
+		 * logical unit does not have enough resources available to
+		 * hold the registration information), no registrations shall be
+		 * made, and the command shall be terminated with
+		 * CHECK CONDITION status."
+		 *
+		 * That means we call __core_scsi3_alloc_registration() here,
+		 * and then call __core_scsi3_add_registration() in the
+		 * 2nd loop which will never fail.
+		 */
+		dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
+				atomic_read(&dest_se_deve->pr_kref.refcount) != 0);
+
+		dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
+					dest_node_acl, dest_lun, dest_se_deve,
+					dest_se_deve->mapped_lun, iport_ptr,
+					sa_res_key, all_tg_pt, aptpl);
+		if (!dest_pr_reg) {
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			kfree(tidh_new);
+			ret = TCM_INVALID_PARAMETER_LIST;
+			goto out_unmap;
+		}
+		tidh_new->dest_pr_reg = dest_pr_reg;
+		list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+		ptr += tid_len;
+		tpdl -= tid_len;
+		tid_len = 0;
+
+	}
+
+	transport_kunmap_data_sg(cmd);
+
+	/*
+	 * Go ahead and create a registrations from tid_dest_list for the
+	 * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
+	 * and dest_se_deve.
+	 *
+	 * The SA Reservation Key from the PROUT is set for the
+	 * registration, and ALL_TG_PT is also passed.  ALL_TG_PT=1
+	 * means that the TransportID Initiator port will be
+	 * registered on all of the target ports in the SCSI target device
+	 * ALL_TG_PT=0 means the registration will only be for the
+	 * SCSI target port the PROUT REGISTER with SPEC_I_PT=1
+	 * was received.
+	 */
+	list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+		dest_tpg = tidh->dest_tpg;
+		dest_node_acl = tidh->dest_node_acl;
+		dest_se_deve = tidh->dest_se_deve;
+		dest_pr_reg = tidh->dest_pr_reg;
+
+		list_del(&tidh->dest_list);
+		kfree(tidh);
+
+		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+		core_pr_dump_initiator_port(dest_pr_reg, i_buf, PR_REG_ISID_ID_LEN);
+
+		__core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
+					dest_pr_reg, 0, 0);
+
+		pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
+			" registered Transport ID for Node: %s%s Mapped LUN:"
+			" %llu\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
+			dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
+			dest_se_deve->mapped_lun : 0);
+
+		if (!dest_se_deve) {
+			kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
+				 target_pr_kref_release);
+			continue;
+		}
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+		core_scsi3_tpg_undepend_item(dest_tpg);
+	}
+
+	return 0;
+out_unmap:
+	transport_kunmap_data_sg(cmd);
+out:
+	/*
+	 * For the failure case, release everything from tid_dest_list
+	 * including *dest_pr_reg and the configfs dependances..
+	 */
+	list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+		dest_tpg = tidh->dest_tpg;
+		dest_node_acl = tidh->dest_node_acl;
+		dest_se_deve = tidh->dest_se_deve;
+		dest_pr_reg = tidh->dest_pr_reg;
+
+		list_del(&tidh->dest_list);
+		kfree(tidh);
+		/*
+		 * Release any extra ALL_TG_PT=1 registrations for
+		 * the SPEC_I_PT=1 case.
+		 */
+		list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+				&dest_pr_reg->pr_reg_atp_list,
+				pr_reg_atp_mem_list) {
+			list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+			core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+			kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+		}
+
+		kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
+
+		if (!dest_se_deve) {
+			kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
+				 target_pr_kref_release);
+			continue;
+		}
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+		core_scsi3_tpg_undepend_item(dest_tpg);
+	}
+	return ret;
+}
+
+static int core_scsi3_update_aptpl_buf(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len)
+{
+	struct se_portal_group *tpg;
+	struct t10_pr_registration *pr_reg;
+	unsigned char tmp[512], isid_buf[32];
+	ssize_t len = 0;
+	int reg_count = 0;
+	int ret = 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	spin_lock(&dev->t10_pr.registration_lock);
+	/*
+	 * Walk the registration list..
+	 */
+	list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
+			pr_reg_list) {
+
+		tmp[0] = '\0';
+		isid_buf[0] = '\0';
+		tpg = pr_reg->pr_reg_nacl->se_tpg;
+		/*
+		 * Write out any ISID value to APTPL metadata that was included
+		 * in the original registration.
+		 */
+		if (pr_reg->isid_present_at_reg)
+			snprintf(isid_buf, 32, "initiator_sid=%s\n",
+					pr_reg->pr_reg_isid);
+		/*
+		 * Include special metadata if the pr_reg matches the
+		 * reservation holder.
+		 */
+		if (dev->dev_pr_res_holder == pr_reg) {
+			snprintf(tmp, 512, "PR_REG_START: %d"
+				"\ninitiator_fabric=%s\n"
+				"initiator_node=%s\n%s"
+				"sa_res_key=%llu\n"
+				"res_holder=1\nres_type=%02x\n"
+				"res_scope=%02x\nres_all_tg_pt=%d\n"
+				"mapped_lun=%llu\n", reg_count,
+				tpg->se_tpg_tfo->get_fabric_name(),
+				pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+				pr_reg->pr_res_key, pr_reg->pr_res_type,
+				pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
+				pr_reg->pr_res_mapped_lun);
+		} else {
+			snprintf(tmp, 512, "PR_REG_START: %d\n"
+				"initiator_fabric=%s\ninitiator_node=%s\n%s"
+				"sa_res_key=%llu\nres_holder=0\n"
+				"res_all_tg_pt=%d\nmapped_lun=%llu\n",
+				reg_count, tpg->se_tpg_tfo->get_fabric_name(),
+				pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+				pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
+				pr_reg->pr_res_mapped_lun);
+		}
+
+		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+			pr_err("Unable to update renaming APTPL metadata,"
+			       " reallocating larger buffer\n");
+			ret = -EMSGSIZE;
+			goto out;
+		}
+		len += sprintf(buf+len, "%s", tmp);
+
+		/*
+		 * Include information about the associated SCSI target port.
+		 */
+		snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
+			"tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:"
+			" %d\n", tpg->se_tpg_tfo->get_fabric_name(),
+			tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+			tpg->se_tpg_tfo->tpg_get_tag(tpg),
+			pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun,
+			reg_count);
+
+		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+			pr_err("Unable to update renaming APTPL metadata,"
+			       " reallocating larger buffer\n");
+			ret = -EMSGSIZE;
+			goto out;
+		}
+		len += sprintf(buf+len, "%s", tmp);
+		reg_count++;
+	}
+
+	if (!reg_count)
+		len += sprintf(buf+len, "No Registrations or Reservations");
+
+out:
+	spin_unlock(&dev->t10_pr.registration_lock);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+static int __core_scsi3_write_aptpl_to_file(
+	struct se_device *dev,
+	unsigned char *buf)
+{
+	struct t10_wwn *wwn = &dev->t10_wwn;
+	struct file *file;
+	int flags = O_RDWR | O_CREAT | O_TRUNC;
+	char path[512];
+	u32 pr_aptpl_buf_len;
+	int ret;
+
+	memset(path, 0, 512);
+
+	if (strlen(&wwn->unit_serial[0]) >= 512) {
+		pr_err("WWN value for struct se_device does not fit"
+			" into path buffer\n");
+		return -EMSGSIZE;
+	}
+
+	snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
+	file = filp_open(path, flags, 0600);
+	if (IS_ERR(file)) {
+		pr_err("filp_open(%s) for APTPL metadata"
+			" failed\n", path);
+		return PTR_ERR(file);
+	}
+
+	pr_aptpl_buf_len = (strlen(buf) + 1); /* Add extra for NULL */
+
+	ret = kernel_write(file, buf, pr_aptpl_buf_len, 0);
+
+	if (ret < 0)
+		pr_debug("Error writing APTPL metadata file: %s\n", path);
+	fput(file);
+
+	return (ret < 0) ? -EIO : 0;
+}
+
+/*
+ * Clear the APTPL metadata if APTPL has been disabled, otherwise
+ * write out the updated metadata to struct file for this SCSI device.
+ */
+static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
+{
+	unsigned char *buf;
+	int rc, len = PR_APTPL_BUF_LEN;
+
+	if (!aptpl) {
+		char *null_buf = "No Registrations or Reservations\n";
+
+		rc = __core_scsi3_write_aptpl_to_file(dev, null_buf);
+		dev->t10_pr.pr_aptpl_active = 0;
+		pr_debug("SPC-3 PR: Set APTPL Bit Deactivated\n");
+
+		if (rc)
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+		return 0;
+	}
+retry:
+	buf = vzalloc(len);
+	if (!buf)
+		return TCM_OUT_OF_RESOURCES;
+
+	rc = core_scsi3_update_aptpl_buf(dev, buf, len);
+	if (rc < 0) {
+		vfree(buf);
+		len *= 2;
+		goto retry;
+	}
+
+	rc = __core_scsi3_write_aptpl_to_file(dev, buf);
+	if (rc != 0) {
+		pr_err("SPC-3 PR: Could not update APTPL\n");
+		vfree(buf);
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	dev->t10_pr.pr_aptpl_active = 1;
+	vfree(buf);
+	pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
+	return 0;
+}
+
+static sense_reason_t
+core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+		bool aptpl, bool all_tg_pt, bool spec_i_pt, enum register_type register_type)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_device *dev = cmd->se_dev;
+	struct se_lun *se_lun = cmd->se_lun;
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+	sense_reason_t ret = TCM_NO_SENSE;
+	int pr_holder = 0, type;
+
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	se_tpg = se_sess->se_tpg;
+
+	if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
+		memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
+		se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0],
+				PR_REG_ISID_LEN);
+		isid_ptr = &isid_buf[0];
+	}
+	/*
+	 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+	if (!pr_reg) {
+		if (res_key) {
+			pr_warn("SPC-3 PR: Reservation Key non-zero"
+				" for SA REGISTER, returning CONFLICT\n");
+			return TCM_RESERVATION_CONFLICT;
+		}
+		/*
+		 * Do nothing but return GOOD status.
+		 */
+		if (!sa_res_key)
+			return 0;
+
+		if (!spec_i_pt) {
+			/*
+			 * Perform the Service Action REGISTER on the Initiator
+			 * Port Endpoint that the PRO was received from on the
+			 * Logical Unit of the SCSI device server.
+			 */
+			if (core_scsi3_alloc_registration(cmd->se_dev,
+					se_sess->se_node_acl, cmd->se_lun,
+					NULL, cmd->orig_fe_lun, isid_ptr,
+					sa_res_key, all_tg_pt, aptpl,
+					register_type, 0)) {
+				pr_err("Unable to allocate"
+					" struct t10_pr_registration\n");
+				return TCM_INVALID_PARAMETER_LIST;
+			}
+		} else {
+			/*
+			 * Register both the Initiator port that received
+			 * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI
+			 * TransportID from Parameter list and loop through
+			 * fabric dependent parameter list while calling
+			 * logic from of core_scsi3_alloc_registration() for
+			 * each TransportID provided SCSI Initiator Port/Device
+			 */
+			ret = core_scsi3_decode_spec_i_port(cmd, se_tpg,
+					isid_ptr, sa_res_key, all_tg_pt, aptpl);
+			if (ret != 0)
+				return ret;
+		}
+		return core_scsi3_update_and_write_aptpl(dev, aptpl);
+	}
+
+	/* ok, existing registration */
+
+	if ((register_type == REGISTER) && (res_key != pr_reg->pr_res_key)) {
+		pr_err("SPC-3 PR REGISTER: Received"
+		       " res_key: 0x%016Lx does not match"
+		       " existing SA REGISTER res_key:"
+		       " 0x%016Lx\n", res_key,
+		       pr_reg->pr_res_key);
+		ret = TCM_RESERVATION_CONFLICT;
+		goto out;
+	}
+
+	if (spec_i_pt) {
+		pr_err("SPC-3 PR REGISTER: SPEC_I_PT"
+			" set on a registered nexus\n");
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	/*
+	 * An existing ALL_TG_PT=1 registration being released
+	 * must also set ALL_TG_PT=1 in the incoming PROUT.
+	 */
+	if (pr_reg->pr_reg_all_tg_pt && !all_tg_pt) {
+		pr_err("SPC-3 PR REGISTER: ALL_TG_PT=1"
+			" registration exists, but ALL_TG_PT=1 bit not"
+			" present in received PROUT\n");
+		ret = TCM_INVALID_CDB_FIELD;
+		goto out;
+	}
+
+	/*
+	 * sa_res_key=1 Change Reservation Key for registered I_T Nexus.
+	 */
+	if (sa_res_key) {
+		/*
+		 * Increment PRgeneration counter for struct se_device"
+		 * upon a successful REGISTER, see spc4r17 section 6.3.2
+		 * READ_KEYS service action.
+		 */
+		pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev);
+		pr_reg->pr_res_key = sa_res_key;
+		pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+			 " Key for %s to: 0x%016Lx PRgeneration:"
+			 " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
+			 (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "",
+			 pr_reg->pr_reg_nacl->initiatorname,
+			 pr_reg->pr_res_key, pr_reg->pr_res_generation);
+
+	} else {
+		/*
+		 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
+		 */
+		type = pr_reg->pr_res_type;
+		pr_holder = core_scsi3_check_implicit_release(cmd->se_dev,
+							      pr_reg);
+		if (pr_holder < 0) {
+			ret = TCM_RESERVATION_CONFLICT;
+			goto out;
+		}
+
+		spin_lock(&pr_tmpl->registration_lock);
+		/*
+		 * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+		 * and matching pr_res_key.
+		 */
+		if (pr_reg->pr_reg_all_tg_pt) {
+			list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+					&pr_tmpl->registration_list,
+					pr_reg_list) {
+
+				if (!pr_reg_p->pr_reg_all_tg_pt)
+					continue;
+				if (pr_reg_p->pr_res_key != res_key)
+					continue;
+				if (pr_reg == pr_reg_p)
+					continue;
+				if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+					   pr_reg_p->pr_reg_nacl->initiatorname))
+					continue;
+
+				__core_scsi3_free_registration(dev,
+						pr_reg_p, NULL, 0);
+			}
+		}
+
+		/*
+		 * Release the calling I_T Nexus registration now..
+		 */
+		__core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
+		pr_reg = NULL;
+
+		/*
+		 * From spc4r17, section 5.7.11.3 Unregistering
+		 *
+		 * If the persistent reservation is a registrants only
+		 * type, the device server shall establish a unit
+		 * attention condition for the initiator port associated
+		 * with every registered I_T nexus except for the I_T
+		 * nexus on which the PERSISTENT RESERVE OUT command was
+		 * received, with the additional sense code set to
+		 * RESERVATIONS RELEASED.
+		 */
+		if (pr_holder &&
+		    (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+		     type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+			list_for_each_entry(pr_reg_p,
+					&pr_tmpl->registration_list,
+					pr_reg_list) {
+
+				target_ua_allocate_lun(
+					pr_reg_p->pr_reg_nacl,
+					pr_reg_p->pr_res_mapped_lun,
+					0x2A,
+					ASCQ_2AH_RESERVATIONS_RELEASED);
+			}
+		}
+
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+
+	ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
+
+out:
+	if (pr_reg)
+		core_scsi3_put_pr_reg(pr_reg);
+	return ret;
+}
+
+unsigned char *core_scsi3_pr_dump_type(int type)
+{
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+		return "Write Exclusive Access";
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+		return "Exclusive Access";
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+		return "Write Exclusive Access, Registrants Only";
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+		return "Exclusive Access, Registrants Only";
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+		return "Write Exclusive Access, All Registrants";
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		return "Exclusive Access, All Registrants";
+	default:
+		break;
+	}
+
+	return "Unknown SPC-3 PR Type";
+}
+
+static sense_reason_t
+core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_lun *se_lun = cmd->se_lun;
+	struct t10_pr_registration *pr_reg, *pr_res_holder;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	sense_reason_t ret;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+				se_sess);
+	if (!pr_reg) {
+		pr_err("SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for RESERVE\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	/*
+	 * From spc4r17 Section 5.7.9: Reserving:
+	 *
+	 * An application client creates a persistent reservation by issuing
+	 * a PERSISTENT RESERVE OUT command with RESERVE service action through
+	 * a registered I_T nexus with the following parameters:
+	 *    a) RESERVATION KEY set to the value of the reservation key that is
+	 * 	 registered with the logical unit for the I_T nexus; and
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+			" does not match existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		ret = TCM_RESERVATION_CONFLICT;
+		goto out_put_pr_reg;
+	}
+	/*
+	 * From spc4r17 Section 5.7.9: Reserving:
+	 *
+	 * From above:
+	 *  b) TYPE field and SCOPE field set to the persistent reservation
+	 *     being created.
+	 *
+	 * Only one persistent reservation is allowed at a time per logical unit
+	 * and that persistent reservation has a scope of LU_SCOPE.
+	 */
+	if (scope != PR_SCOPE_LU_SCOPE) {
+		pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out_put_pr_reg;
+	}
+	/*
+	 * See if we have an existing PR reservation holder pointer at
+	 * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration
+	 * *pr_res_holder.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder) {
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If the device server receives a PERSISTENT RESERVE OUT
+		 * command from an I_T nexus other than a persistent reservation
+		 * holder (see 5.7.10) that attempts to create a persistent
+		 * reservation when a persistent reservation already exists for
+		 * the logical unit, then the command shall be completed with
+		 * RESERVATION CONFLICT status.
+		 */
+		if (!is_reservation_holder(pr_res_holder, pr_reg)) {
+			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+			pr_err("SPC-3 PR: Attempted RESERVE from"
+				" [%s]: %s while reservation already held by"
+				" [%s]: %s, returning RESERVATION_CONFLICT\n",
+				cmd->se_tfo->get_fabric_name(),
+				se_sess->se_node_acl->initiatorname,
+				pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+				pr_res_holder->pr_reg_nacl->initiatorname);
+
+			spin_unlock(&dev->dev_reservation_lock);
+			ret = TCM_RESERVATION_CONFLICT;
+			goto out_put_pr_reg;
+		}
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If a persistent reservation holder attempts to modify the
+		 * type or scope of an existing persistent reservation, the
+		 * command shall be completed with RESERVATION CONFLICT status.
+		 */
+		if ((pr_res_holder->pr_res_type != type) ||
+		    (pr_res_holder->pr_res_scope != scope)) {
+			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+			pr_err("SPC-3 PR: Attempted RESERVE from"
+				" [%s]: %s trying to change TYPE and/or SCOPE,"
+				" while reservation already held by [%s]: %s,"
+				" returning RESERVATION_CONFLICT\n",
+				cmd->se_tfo->get_fabric_name(),
+				se_sess->se_node_acl->initiatorname,
+				pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+				pr_res_holder->pr_reg_nacl->initiatorname);
+
+			spin_unlock(&dev->dev_reservation_lock);
+			ret = TCM_RESERVATION_CONFLICT;
+			goto out_put_pr_reg;
+		}
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If the device server receives a PERSISTENT RESERVE OUT
+		 * command with RESERVE service action where the TYPE field and
+		 * the SCOPE field contain the same values as the existing type
+		 * and scope from a persistent reservation holder, it shall not
+		 * make any change to the existing persistent reservation and
+		 * shall completethe command with GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = 0;
+		goto out_put_pr_reg;
+	}
+	/*
+	 * Otherwise, our *pr_reg becomes the PR reservation holder for said
+	 * TYPE/SCOPE.  Also set the received scope and type in *pr_reg.
+	 */
+	pr_reg->pr_res_scope = scope;
+	pr_reg->pr_res_type = type;
+	pr_reg->pr_res_holder = 1;
+	dev->dev_pr_res_holder = pr_reg;
+	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
+
+	pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
+			cmd->se_tfo->get_fabric_name(),
+			se_sess->se_node_acl->initiatorname,
+			i_buf);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	if (pr_tmpl->pr_aptpl_active)
+		core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
+
+	ret = 0;
+out_put_pr_reg:
+	core_scsi3_put_pr_reg(pr_reg);
+	return ret;
+}
+
+static sense_reason_t
+core_scsi3_emulate_pro_reserve(struct se_cmd *cmd, int type, int scope,
+		u64 res_key)
+{
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		return core_scsi3_pro_reserve(cmd, type, scope, res_key);
+	default:
+		pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
+			" 0x%02x\n", type);
+		return TCM_INVALID_CDB_FIELD;
+	}
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_release(
+	struct se_device *dev,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int explicit,
+	int unreg)
+{
+	const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int pr_res_type = 0, pr_res_scope = 0;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
+	/*
+	 * Go ahead and release the current PR reservation holder.
+	 * If an All Registrants reservation is currently active and
+	 * a unregister operation is requested, replace the current
+	 * dev_pr_res_holder with another active registration.
+	 */
+	if (dev->dev_pr_res_holder) {
+		pr_res_type = dev->dev_pr_res_holder->pr_res_type;
+		pr_res_scope = dev->dev_pr_res_holder->pr_res_scope;
+		dev->dev_pr_res_holder->pr_res_type = 0;
+		dev->dev_pr_res_holder->pr_res_scope = 0;
+		dev->dev_pr_res_holder->pr_res_holder = 0;
+		dev->dev_pr_res_holder = NULL;
+	}
+	if (!unreg)
+		goto out;
+
+	spin_lock(&dev->t10_pr.registration_lock);
+	list_del_init(&pr_reg->pr_reg_list);
+	/*
+	 * If the I_T nexus is a reservation holder, the persistent reservation
+	 * is of an all registrants type, and the I_T nexus is the last remaining
+	 * registered I_T nexus, then the device server shall also release the
+	 * persistent reservation.
+	 */
+	if (!list_empty(&dev->t10_pr.registration_list) &&
+	    ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	     (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) {
+		dev->dev_pr_res_holder =
+			list_entry(dev->t10_pr.registration_list.next,
+				   struct t10_pr_registration, pr_reg_list);
+		dev->dev_pr_res_holder->pr_res_type = pr_res_type;
+		dev->dev_pr_res_holder->pr_res_scope = pr_res_scope;
+		dev->dev_pr_res_holder->pr_res_holder = 1;
+	}
+	spin_unlock(&dev->t10_pr.registration_lock);
+out:
+	if (!dev->dev_pr_res_holder) {
+		pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+			" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+			tfo->get_fabric_name(), (explicit) ? "explicit" :
+			"implicit", core_scsi3_pr_dump_type(pr_res_type),
+			(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	}
+	pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
+		tfo->get_fabric_name(), se_nacl->initiatorname,
+		i_buf);
+	/*
+	 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
+	 */
+	pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
+}
+
+static sense_reason_t
+core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
+		u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_lun *se_lun = cmd->se_lun;
+	struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	sense_reason_t ret = 0;
+
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+	if (!pr_reg) {
+		pr_err("SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for RELEASE\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing:
+	 *
+	 * If there is no persistent reservation or in response to a persistent
+	 * reservation release request from a registered I_T nexus that is not a
+	 * persistent reservation holder (see 5.7.10), the device server shall
+	 * do the following:
+	 *
+	 *     a) Not release the persistent reservation, if any;
+	 *     b) Not remove any registrations; and
+	 *     c) Complete the command with GOOD status.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!pr_res_holder) {
+		/*
+		 * No persistent reservation, return GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		goto out_put_pr_reg;
+	}
+
+	if (!is_reservation_holder(pr_res_holder, pr_reg)) {
+		/*
+		 * Release request from a registered I_T nexus that is not a
+		 * persistent reservation holder. return GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		goto out_put_pr_reg;
+	}
+
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing:
+	 *
+	 * Only the persistent reservation holder (see 5.7.10) is allowed to
+	 * release a persistent reservation.
+	 *
+	 * An application client releases the persistent reservation by issuing
+	 * a PERSISTENT RESERVE OUT command with RELEASE service action through
+	 * an I_T nexus that is a persistent reservation holder with the
+	 * following parameters:
+	 *
+	 *     a) RESERVATION KEY field set to the value of the reservation key
+	 *	  that is registered with the logical unit for the I_T nexus;
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+			" does not match existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = TCM_RESERVATION_CONFLICT;
+		goto out_put_pr_reg;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing and above:
+	 *
+	 * b) TYPE field and SCOPE field set to match the persistent
+	 *    reservation being released.
+	 */
+	if ((pr_res_holder->pr_res_type != type) ||
+	    (pr_res_holder->pr_res_scope != scope)) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		pr_err("SPC-3 PR RELEASE: Attempted to release"
+			" reservation from [%s]: %s with different TYPE "
+			"and/or SCOPE  while reservation already held by"
+			" [%s]: %s, returning RESERVATION_CONFLICT\n",
+			cmd->se_tfo->get_fabric_name(),
+			se_sess->se_node_acl->initiatorname,
+			pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+			pr_res_holder->pr_reg_nacl->initiatorname);
+
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = TCM_RESERVATION_CONFLICT;
+		goto out_put_pr_reg;
+	}
+	/*
+	 * In response to a persistent reservation release request from the
+	 * persistent reservation holder the device server shall perform a
+	 * release by doing the following as an uninterrupted series of actions:
+	 * a) Release the persistent reservation;
+	 * b) Not remove any registration(s);
+	 * c) If the released persistent reservation is a registrants only type
+	 * or all registrants type persistent reservation,
+	 *    the device server shall establish a unit attention condition for
+	 *    the initiator port associated with every regis-
+	 *    tered I_T nexus other than I_T nexus on which the PERSISTENT
+	 *    RESERVE OUT command with RELEASE service action was received,
+	 *    with the additional sense code set to RESERVATIONS RELEASED; and
+	 * d) If the persistent reservation is of any other type, the device
+	 *    server shall not establish a unit attention condition.
+	 */
+	__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
+					  pr_reg, 1, 0);
+
+	spin_unlock(&dev->dev_reservation_lock);
+
+	if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) &&
+	    (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) &&
+	    (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
+	    (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+		/*
+		 * If no UNIT ATTENTION conditions will be established for
+		 * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS
+		 * go ahead and check for APTPL=1 update+write below
+		 */
+		goto write_aptpl;
+	}
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list,
+			pr_reg_list) {
+		/*
+		 * Do not establish a UNIT ATTENTION condition
+		 * for the calling I_T Nexus
+		 */
+		if (pr_reg_p == pr_reg)
+			continue;
+
+		target_ua_allocate_lun(pr_reg_p->pr_reg_nacl,
+				pr_reg_p->pr_res_mapped_lun,
+				0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+write_aptpl:
+	if (pr_tmpl->pr_aptpl_active)
+		core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
+
+out_put_pr_reg:
+	core_scsi3_put_pr_reg(pr_reg);
+	return ret;
+}
+
+static sense_reason_t
+core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_session *se_sess = cmd->se_sess;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+	u64 pr_res_mapped_lun = 0;
+	int calling_it_nexus = 0;
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev,
+			se_sess->se_node_acl, se_sess);
+	if (!pr_reg_n) {
+		pr_err("SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for CLEAR\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	/*
+	 * From spc4r17 section 5.7.11.6, Clearing:
+	 *
+	 * Any application client may release the persistent reservation and
+	 * remove all registrations from a device server by issuing a
+	 * PERSISTENT RESERVE OUT command with CLEAR service action through a
+	 * registered I_T nexus with the following parameter:
+	 *
+	 *	a) RESERVATION KEY field set to the value of the reservation key
+	 * 	   that is registered with the logical unit for the I_T nexus.
+	 */
+	if (res_key != pr_reg_n->pr_res_key) {
+		pr_err("SPC-3 PR REGISTER: Received"
+			" res_key: 0x%016Lx does not match"
+			" existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return TCM_RESERVATION_CONFLICT;
+	}
+	/*
+	 * a) Release the persistent reservation, if any;
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+						  pr_res_holder, 0, 0);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * b) Remove all registration(s) (see spc4r17 5.7.7);
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+		pr_reg_nacl = pr_reg->pr_reg_nacl;
+		pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+		__core_scsi3_free_registration(dev, pr_reg, NULL,
+					calling_it_nexus);
+		/*
+		 * e) Establish a unit attention condition for the initiator
+		 *    port associated with every registered I_T nexus other
+		 *    than the I_T nexus on which the PERSISTENT RESERVE OUT
+		 *    command with CLEAR service action was received, with the
+		 *    additional sense code set to RESERVATIONS PREEMPTED.
+		 */
+		if (!calling_it_nexus)
+			target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun,
+				0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
+		cmd->se_tfo->get_fabric_name());
+
+	core_scsi3_update_and_write_aptpl(cmd->se_dev, false);
+
+	core_scsi3_pr_generation(dev);
+	return 0;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_preempt(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg,
+	struct list_head *preempt_and_abort_list,
+	int type,
+	int scope,
+	enum preempt_type preempt_type)
+{
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	char i_buf[PR_REG_ISID_ID_LEN];
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
+	/*
+	 * Do an implicit RELEASE of the existing reservation.
+	 */
+	if (dev->dev_pr_res_holder)
+		__core_scsi3_complete_pro_release(dev, nacl,
+						  dev->dev_pr_res_holder, 0, 0);
+
+	dev->dev_pr_res_holder = pr_reg;
+	pr_reg->pr_res_holder = 1;
+	pr_reg->pr_res_type = type;
+	pr_reg->pr_res_scope = scope;
+
+	pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
+		core_scsi3_pr_dump_type(type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+		tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
+		nacl->initiatorname, i_buf);
+	/*
+	 * For PREEMPT_AND_ABORT, add the preempting reservation's
+	 * struct t10_pr_registration to the list that will be compared
+	 * against received CDBs..
+	 */
+	if (preempt_and_abort_list)
+		list_add_tail(&pr_reg->pr_reg_abort_list,
+				preempt_and_abort_list);
+}
+
+static void core_scsi3_release_preempt_and_abort(
+	struct list_head *preempt_and_abort_list,
+	struct t10_pr_registration *pr_reg_holder)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+				pr_reg_abort_list) {
+
+		list_del(&pr_reg->pr_reg_abort_list);
+		if (pr_reg_holder == pr_reg)
+			continue;
+		if (pr_reg->pr_res_holder) {
+			pr_warn("pr_reg->pr_res_holder still set\n");
+			continue;
+		}
+
+		pr_reg->pr_reg_deve = NULL;
+		pr_reg->pr_reg_nacl = NULL;
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	}
+}
+
+static sense_reason_t
+core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
+		u64 sa_res_key, enum preempt_type preempt_type)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_session *se_sess = cmd->se_sess;
+	LIST_HEAD(preempt_and_abort_list);
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	u64 pr_res_mapped_lun = 0;
+	int all_reg = 0, calling_it_nexus = 0;
+	bool sa_res_key_unmatched = sa_res_key != 0;
+	int prh_type = 0, prh_scope = 0;
+
+	if (!se_sess)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+				se_sess);
+	if (!pr_reg_n) {
+		pr_err("SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for PREEMPT%s\n",
+			(preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "");
+		return TCM_RESERVATION_CONFLICT;
+	}
+	if (pr_reg_n->pr_res_key != res_key) {
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return TCM_RESERVATION_CONFLICT;
+	}
+	if (scope != PR_SCOPE_LU_SCOPE) {
+		pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return TCM_INVALID_PARAMETER_LIST;
+	}
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder &&
+	   ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
+		all_reg = 1;
+
+	if (!all_reg && !sa_res_key) {
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return TCM_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
+	 *
+	 * If the SERVICE ACTION RESERVATION KEY field does not identify a
+	 * persistent reservation holder or there is no persistent reservation
+	 * holder (i.e., there is no persistent reservation), then the device
+	 * server shall perform a preempt by doing the following in an
+	 * uninterrupted series of actions. (See below..)
+	 */
+	if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) {
+		/*
+		 * No existing or SA Reservation Key matching reservations..
+		 *
+		 * PROUT SA PREEMPT with All Registrant type reservations are
+		 * allowed to be processed without a matching SA Reservation Key
+		 */
+		spin_lock(&pr_tmpl->registration_lock);
+		list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+				&pr_tmpl->registration_list, pr_reg_list) {
+			/*
+			 * Removing of registrations in non all registrants
+			 * type reservations without a matching SA reservation
+			 * key.
+			 *
+			 * a) Remove the registrations for all I_T nexuses
+			 *    specified by the SERVICE ACTION RESERVATION KEY
+			 *    field;
+			 * b) Ignore the contents of the SCOPE and TYPE fields;
+			 * c) Process tasks as defined in 5.7.1; and
+			 * d) Establish a unit attention condition for the
+			 *    initiator port associated with every I_T nexus
+			 *    that lost its registration other than the I_T
+			 *    nexus on which the PERSISTENT RESERVE OUT command
+			 *    was received, with the additional sense code set
+			 *    to REGISTRATIONS PREEMPTED.
+			 */
+			if (!all_reg) {
+				if (pr_reg->pr_res_key != sa_res_key)
+					continue;
+				sa_res_key_unmatched = false;
+
+				calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+				pr_reg_nacl = pr_reg->pr_reg_nacl;
+				pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+				__core_scsi3_free_registration(dev, pr_reg,
+					(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
+						NULL, calling_it_nexus);
+			} else {
+				/*
+				 * Case for any existing all registrants type
+				 * reservation, follow logic in spc4r17 section
+				 * 5.7.11.4 Preempting, Table 52 and Figure 7.
+				 *
+				 * For a ZERO SA Reservation key, release
+				 * all other registrations and do an implicit
+				 * release of active persistent reservation.
+				 *
+				 * For a non-ZERO SA Reservation key, only
+				 * release the matching reservation key from
+				 * registrations.
+				 */
+				if ((sa_res_key) &&
+				     (pr_reg->pr_res_key != sa_res_key))
+					continue;
+				sa_res_key_unmatched = false;
+
+				calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+				if (calling_it_nexus)
+					continue;
+
+				pr_reg_nacl = pr_reg->pr_reg_nacl;
+				pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+				__core_scsi3_free_registration(dev, pr_reg,
+					(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
+						NULL, 0);
+			}
+			if (!calling_it_nexus)
+				target_ua_allocate_lun(pr_reg_nacl,
+					pr_res_mapped_lun, 0x2A,
+					ASCQ_2AH_REGISTRATIONS_PREEMPTED);
+		}
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * If a PERSISTENT RESERVE OUT with a PREEMPT service action or
+		 * a PREEMPT AND ABORT service action sets the SERVICE ACTION
+		 * RESERVATION KEY field to a value that does not match any
+		 * registered reservation key, then the device server shall
+		 * complete the command with RESERVATION CONFLICT status.
+		 */
+		if (sa_res_key_unmatched) {
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg_n);
+			return TCM_RESERVATION_CONFLICT;
+		}
+		/*
+		 * For an existing all registrants type reservation
+		 * with a zero SA rservation key, preempt the existing
+		 * reservation with the new PR type and scope.
+		 */
+		if (pr_res_holder && all_reg && !(sa_res_key)) {
+			__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+				(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
+				type, scope, preempt_type);
+
+			if (preempt_type == PREEMPT_AND_ABORT)
+				core_scsi3_release_preempt_and_abort(
+					&preempt_and_abort_list, pr_reg_n);
+		}
+		spin_unlock(&dev->dev_reservation_lock);
+
+		if (pr_tmpl->pr_aptpl_active)
+			core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
+
+		core_scsi3_put_pr_reg(pr_reg_n);
+		core_scsi3_pr_generation(cmd->se_dev);
+		return 0;
+	}
+	/*
+	 * The PREEMPTing SA reservation key matches that of the
+	 * existing persistent reservation, first, we check if
+	 * we are preempting our own reservation.
+	 * From spc4r17, section 5.7.11.4.3 Preempting
+	 * persistent reservations and registration handling
+	 *
+	 * If an all registrants persistent reservation is not
+	 * present, it is not an error for the persistent
+	 * reservation holder to preempt itself (i.e., a
+	 * PERSISTENT RESERVE OUT with a PREEMPT service action
+	 * or a PREEMPT AND ABORT service action with the
+	 * SERVICE ACTION RESERVATION KEY value equal to the
+	 * persistent reservation holder's reservation key that
+	 * is received from the persistent reservation holder).
+	 * In that case, the device server shall establish the
+	 * new persistent reservation and maintain the
+	 * registration.
+	 */
+	prh_type = pr_res_holder->pr_res_type;
+	prh_scope = pr_res_holder->pr_res_scope;
+	/*
+	 * If the SERVICE ACTION RESERVATION KEY field identifies a
+	 * persistent reservation holder (see 5.7.10), the device
+	 * server shall perform a preempt by doing the following as
+	 * an uninterrupted series of actions:
+	 *
+	 * a) Release the persistent reservation for the holder
+	 *    identified by the SERVICE ACTION RESERVATION KEY field;
+	 */
+	if (pr_reg_n != pr_res_holder)
+		__core_scsi3_complete_pro_release(dev,
+						  pr_res_holder->pr_reg_nacl,
+						  dev->dev_pr_res_holder, 0, 0);
+	/*
+	 * b) Remove the registrations for all I_T nexuses identified
+	 *    by the SERVICE ACTION RESERVATION KEY field, except the
+	 *    I_T nexus that is being used for the PERSISTENT RESERVE
+	 *    OUT command. If an all registrants persistent reservation
+	 *    is present and the SERVICE ACTION RESERVATION KEY field
+	 *    is set to zero, then all registrations shall be removed
+	 *    except for that of the I_T nexus that is being used for
+	 *    the PERSISTENT RESERVE OUT command;
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+		if (calling_it_nexus)
+			continue;
+
+		if (pr_reg->pr_res_key != sa_res_key)
+			continue;
+
+		pr_reg_nacl = pr_reg->pr_reg_nacl;
+		pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+		__core_scsi3_free_registration(dev, pr_reg,
+				(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
+				calling_it_nexus);
+		/*
+		 * e) Establish a unit attention condition for the initiator
+		 *    port associated with every I_T nexus that lost its
+		 *    persistent reservation and/or registration, with the
+		 *    additional sense code set to REGISTRATIONS PREEMPTED;
+		 */
+		target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+				ASCQ_2AH_REGISTRATIONS_PREEMPTED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * c) Establish a persistent reservation for the preempting
+	 *    I_T nexus using the contents of the SCOPE and TYPE fields;
+	 */
+	__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+			(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
+			type, scope, preempt_type);
+	/*
+	 * d) Process tasks as defined in 5.7.1;
+	 * e) See above..
+	 * f) If the type or scope has changed, then for every I_T nexus
+	 *    whose reservation key was not removed, except for the I_T
+	 *    nexus on which the PERSISTENT RESERVE OUT command was
+	 *    received, the device server shall establish a unit
+	 *    attention condition for the initiator port associated with
+	 *    that I_T nexus, with the additional sense code set to
+	 *    RESERVATIONS RELEASED. If the type or scope have not
+	 *    changed, then no unit attention condition(s) shall be
+	 *    established for this reason.
+	 */
+	if ((prh_type != type) || (prh_scope != scope)) {
+		spin_lock(&pr_tmpl->registration_lock);
+		list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+				&pr_tmpl->registration_list, pr_reg_list) {
+
+			calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+			if (calling_it_nexus)
+				continue;
+
+			target_ua_allocate_lun(pr_reg->pr_reg_nacl,
+					pr_reg->pr_res_mapped_lun, 0x2A,
+					ASCQ_2AH_RESERVATIONS_RELEASED);
+		}
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * Call LUN_RESET logic upon list of struct t10_pr_registration,
+	 * All received CDBs for the matching existing reservation and
+	 * registrations undergo ABORT_TASK logic.
+	 *
+	 * From there, core_scsi3_release_preempt_and_abort() will
+	 * release every registration in the list (which have already
+	 * been removed from the primary pr_reg list), except the
+	 * new persistent reservation holder, the calling Initiator Port.
+	 */
+	if (preempt_type == PREEMPT_AND_ABORT) {
+		core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
+		core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
+						pr_reg_n);
+	}
+
+	if (pr_tmpl->pr_aptpl_active)
+		core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
+
+	core_scsi3_put_pr_reg(pr_reg_n);
+	core_scsi3_pr_generation(cmd->se_dev);
+	return 0;
+}
+
+static sense_reason_t
+core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope,
+		u64 res_key, u64 sa_res_key, enum preempt_type preempt_type)
+{
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		return core_scsi3_pro_preempt(cmd, type, scope, res_key,
+					      sa_res_key, preempt_type);
+	default:
+		pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
+			" Type: 0x%02x\n", (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", type);
+		return TCM_INVALID_CDB_FIELD;
+	}
+}
+
+
+static sense_reason_t
+core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
+		u64 sa_res_key, int aptpl, int unreg)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_device *dev = cmd->se_dev;
+	struct se_dev_entry *dest_se_deve = NULL;
+	struct se_lun *se_lun = cmd->se_lun, *tmp_lun;
+	struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
+	struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
+	const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
+	struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	unsigned char *buf;
+	const unsigned char *initiator_str;
+	char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
+	u32 tid_len, tmp_tid_len;
+	int new_reg = 0, type, scope, matching_iname;
+	sense_reason_t ret;
+	unsigned short rtpi;
+	unsigned char proto_ident;
+
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	se_tpg = se_sess->se_tpg;
+	tf_ops = se_tpg->se_tpg_tfo;
+	/*
+	 * Follow logic from spc4r17 Section 5.7.8, Table 50 --
+	 *	Register behaviors for a REGISTER AND MOVE service action
+	 *
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+				se_sess);
+	if (!pr_reg) {
+		pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
+			" *pr_reg for REGISTER_AND_MOVE\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	/*
+	 * The provided reservation key much match the existing reservation key
+	 * provided during this initiator's I_T nexus registration.
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
+			" res_key: 0x%016Lx does not match existing SA REGISTER"
+			" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		ret = TCM_RESERVATION_CONFLICT;
+		goto out_put_pr_reg;
+	}
+	/*
+	 * The service active reservation key needs to be non zero
+	 */
+	if (!sa_res_key) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
+			" sa_res_key\n");
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out_put_pr_reg;
+	}
+
+	/*
+	 * Determine the Relative Target Port Identifier where the reservation
+	 * will be moved to for the TransportID containing SCSI initiator WWN
+	 * information.
+	 */
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf) {
+		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		goto out_put_pr_reg;
+	}
+
+	rtpi = (buf[18] & 0xff) << 8;
+	rtpi |= buf[19] & 0xff;
+	tid_len = (buf[20] & 0xff) << 24;
+	tid_len |= (buf[21] & 0xff) << 16;
+	tid_len |= (buf[22] & 0xff) << 8;
+	tid_len |= buf[23] & 0xff;
+	transport_kunmap_data_sg(cmd);
+	buf = NULL;
+
+	if ((tid_len + 24) != cmd->data_length) {
+		pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+			" does not equal CDB data_length: %u\n", tid_len,
+			cmd->data_length);
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out_put_pr_reg;
+	}
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) {
+		if (tmp_lun->lun_rtpi != rtpi)
+			continue;
+		dest_se_tpg = tmp_lun->lun_tpg;
+		dest_tf_ops = dest_se_tpg->se_tpg_tfo;
+		if (!dest_tf_ops)
+			continue;
+
+		atomic_inc_mb(&dest_se_tpg->tpg_pr_ref_count);
+		spin_unlock(&dev->se_port_lock);
+
+		if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
+			pr_err("core_scsi3_tpg_depend_item() failed"
+				" for dest_se_tpg\n");
+			atomic_dec_mb(&dest_se_tpg->tpg_pr_ref_count);
+			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			goto out_put_pr_reg;
+		}
+
+		spin_lock(&dev->se_port_lock);
+		break;
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	if (!dest_se_tpg || !dest_tf_ops) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+			" fabric ops from Relative Target Port Identifier:"
+			" %hu\n", rtpi);
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out_put_pr_reg;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf) {
+		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		goto out_put_pr_reg;
+	}
+	proto_ident = (buf[24] & 0x0f);
+
+	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+			" 0x%02x\n", proto_ident);
+
+	if (proto_ident != dest_se_tpg->proto_id) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
+			" proto_ident: 0x%02x does not match ident: 0x%02x"
+			" from fabric: %s\n", proto_ident,
+			dest_se_tpg->proto_id,
+			dest_tf_ops->get_fabric_name());
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	initiator_str = target_parse_pr_out_transport_id(dest_se_tpg,
+			(const char *)&buf[24], &tmp_tid_len, &iport_ptr);
+	if (!initiator_str) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+			" initiator_str from Transport ID\n");
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	transport_kunmap_data_sg(cmd);
+	buf = NULL;
+
+	pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+		" %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
+		"port" : "device", initiator_str, (iport_ptr != NULL) ?
+		iport_ptr : "");
+	/*
+	 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+	 * action specifies a TransportID that is the same as the initiator port
+	 * of the I_T nexus for the command received, then the command shall
+	 * be terminated with CHECK CONDITION status, with the sense key set to
+	 * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD
+	 * IN PARAMETER LIST.
+	 */
+	pr_reg_nacl = pr_reg->pr_reg_nacl;
+	matching_iname = (!strcmp(initiator_str,
+				  pr_reg_nacl->initiatorname)) ? 1 : 0;
+	if (!matching_iname)
+		goto after_iport_check;
+
+	if (!iport_ptr || !pr_reg->isid_present_at_reg) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+			" matches: %s on received I_T Nexus\n", initiator_str,
+			pr_reg_nacl->initiatorname);
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+			" matches: %s %s on received I_T Nexus\n",
+			initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
+			pr_reg->pr_reg_isid);
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+after_iport_check:
+	/*
+	 * Locate the destination struct se_node_acl from the received Transport ID
+	 */
+	mutex_lock(&dest_se_tpg->acl_node_mutex);
+	dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
+				initiator_str);
+	if (dest_node_acl)
+		atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
+	mutex_unlock(&dest_se_tpg->acl_node_mutex);
+
+	if (!dest_node_acl) {
+		pr_err("Unable to locate %s dest_node_acl for"
+			" TransportID%s\n", dest_tf_ops->get_fabric_name(),
+			initiator_str);
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
+		pr_err("core_scsi3_nodeacl_depend_item() for"
+			" dest_node_acl\n");
+		atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
+		dest_node_acl = NULL;
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+		" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
+		dest_node_acl->initiatorname);
+
+	/*
+	 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
+	 * PORT IDENTIFIER.
+	 */
+	dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
+	if (!dest_se_deve) {
+		pr_err("Unable to locate %s dest_se_deve from RTPI:"
+			" %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
+		pr_err("core_scsi3_lunacl_depend_item() failed\n");
+		kref_put(&dest_se_deve->pr_kref, target_pr_kref_release);
+		dest_se_deve = NULL;
+		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		goto out;
+	}
+
+	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+		" ACL for dest_se_deve->mapped_lun: %llu\n",
+		dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
+		dest_se_deve->mapped_lun);
+
+	/*
+	 * A persistent reservation needs to already existing in order to
+	 * successfully complete the REGISTER_AND_MOVE service action..
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!pr_res_holder) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
+			" currently held\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = TCM_INVALID_CDB_FIELD;
+		goto out;
+	}
+	/*
+	 * The received on I_T Nexus must be the reservation holder.
+	 *
+	 * From spc4r17 section 5.7.8  Table 50 --
+	 * 	Register behaviors for a REGISTER AND MOVE service action
+	 */
+	if (!is_reservation_holder(pr_res_holder, pr_reg)) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+			" Nexus is not reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = TCM_RESERVATION_CONFLICT;
+		goto out;
+	}
+	/*
+	 * From spc4r17 section 5.7.8: registering and moving reservation
+	 *
+	 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+	 * action is received and the established persistent reservation is a
+	 * Write Exclusive - All Registrants type or Exclusive Access -
+	 * All Registrants type reservation, then the command shall be completed
+	 * with RESERVATION CONFLICT status.
+	 */
+	if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+			" reservation for type: %s\n",
+			core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = TCM_RESERVATION_CONFLICT;
+		goto out;
+	}
+	pr_res_nacl = pr_res_holder->pr_reg_nacl;
+	/*
+	 * b) Ignore the contents of the (received) SCOPE and TYPE fields;
+	 */
+	type = pr_res_holder->pr_res_type;
+	scope = pr_res_holder->pr_res_type;
+	/*
+	 * c) Associate the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field with the I_T nexus specified as the
+	 *    destination of the register and move, where:
+	 *    A) The I_T nexus is specified by the TransportID and the
+	 *	 RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and
+	 *    B) Regardless of the TransportID format used, the association for
+	 *       the initiator port is based on either the initiator port name
+	 *       (see 3.1.71) on SCSI transport protocols where port names are
+	 *       required or the initiator port identifier (see 3.1.70) on SCSI
+	 *       transport protocols where port names are not required;
+	 * d) Register the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field;
+	 * e) Retain the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field and associated information;
+	 *
+	 * Also, It is not an error for a REGISTER AND MOVE service action to
+	 * register an I_T nexus that is already registered with the same
+	 * reservation key or a different reservation key.
+	 */
+	dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+					iport_ptr);
+	if (!dest_pr_reg) {
+		struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
+				atomic_read(&dest_se_deve->pr_kref.refcount) != 0);
+
+		spin_unlock(&dev->dev_reservation_lock);
+		if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
+					dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
+					iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
+			ret = TCM_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+		spin_lock(&dev->dev_reservation_lock);
+		dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+						iport_ptr);
+		new_reg = 1;
+	}
+	/*
+	 * f) Release the persistent reservation for the persistent reservation
+	 *    holder (i.e., the I_T nexus on which the
+	 */
+	__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+					  dev->dev_pr_res_holder, 0, 0);
+	/*
+	 * g) Move the persistent reservation to the specified I_T nexus using
+	 *    the same scope and type as the persistent reservation released in
+	 *    item f); and
+	 */
+	dev->dev_pr_res_holder = dest_pr_reg;
+	dest_pr_reg->pr_res_holder = 1;
+	dest_pr_reg->pr_res_type = type;
+	pr_reg->pr_res_scope = scope;
+	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
+	/*
+	 * Increment PRGeneration for existing registrations..
+	 */
+	if (!new_reg)
+		dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+		" created new reservation holder TYPE: %s on object RTPI:"
+		" %hu  PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
+		core_scsi3_pr_dump_type(type), rtpi,
+		dest_pr_reg->pr_res_generation);
+	pr_debug("SPC-3 PR Successfully moved reservation from"
+		" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
+		tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
+		i_buf, dest_tf_ops->get_fabric_name(),
+		dest_node_acl->initiatorname, (iport_ptr != NULL) ?
+		iport_ptr : "");
+	/*
+	 * It is now safe to release configfs group dependencies for destination
+	 * of Transport ID Initiator Device/Port Identifier
+	 */
+	core_scsi3_lunacl_undepend_item(dest_se_deve);
+	core_scsi3_nodeacl_undepend_item(dest_node_acl);
+	core_scsi3_tpg_undepend_item(dest_se_tpg);
+	/*
+	 * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T
+	 * nexus on which PERSISTENT RESERVE OUT command was received.
+	 */
+	if (unreg) {
+		spin_lock(&pr_tmpl->registration_lock);
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 1);
+		spin_unlock(&pr_tmpl->registration_lock);
+	} else
+		core_scsi3_put_pr_reg(pr_reg);
+
+	core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
+
+	transport_kunmap_data_sg(cmd);
+
+	core_scsi3_put_pr_reg(dest_pr_reg);
+	return 0;
+out:
+	if (buf)
+		transport_kunmap_data_sg(cmd);
+	if (dest_se_deve)
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+	if (dest_node_acl)
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+	core_scsi3_tpg_undepend_item(dest_se_tpg);
+
+out_put_pr_reg:
+	core_scsi3_put_pr_reg(pr_reg);
+	return ret;
+}
+
+static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
+	__v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * See spc4r17 section 6.14 Table 170
+ */
+sense_reason_t
+target_scsi3_emulate_pr_out(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *cdb = &cmd->t_task_cdb[0];
+	unsigned char *buf;
+	u64 res_key, sa_res_key;
+	int sa, scope, type, aptpl;
+	int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
+	sense_reason_t ret;
+
+	/*
+	 * Following spc2r20 5.5.1 Reservations overview:
+	 *
+	 * If a logical unit has been reserved by any RESERVE command and is
+	 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
+	 * PERSISTENT RESERVE OUT commands shall conflict regardless of
+	 * initiator or service action and shall terminate with a RESERVATION
+	 * CONFLICT status.
+	 */
+	if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
+		pr_err("Received PERSISTENT_RESERVE CDB while legacy"
+			" SPC-2 reservation is held, returning"
+			" RESERVATION_CONFLICT\n");
+		return TCM_RESERVATION_CONFLICT;
+	}
+
+	/*
+	 * FIXME: A NULL struct se_session pointer means an this is not coming from
+	 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
+	 */
+	if (!cmd->se_sess)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	if (cmd->data_length < 24) {
+		pr_warn("SPC-PR: Received PR OUT parameter list"
+			" length too small: %u\n", cmd->data_length);
+		return TCM_INVALID_PARAMETER_LIST;
+	}
+
+	/*
+	 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
+	 */
+	sa = (cdb[1] & 0x1f);
+	scope = (cdb[2] & 0xf0);
+	type = (cdb[2] & 0x0f);
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	/*
+	 * From PERSISTENT_RESERVE_OUT parameter list (payload)
+	 */
+	res_key = core_scsi3_extract_reservation_key(&buf[0]);
+	sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
+	/*
+	 * REGISTER_AND_MOVE uses a different SA parameter list containing
+	 * SCSI TransportIDs.
+	 */
+	if (sa != PRO_REGISTER_AND_MOVE) {
+		spec_i_pt = (buf[20] & 0x08);
+		all_tg_pt = (buf[20] & 0x04);
+		aptpl = (buf[20] & 0x01);
+	} else {
+		aptpl = (buf[17] & 0x01);
+		unreg = (buf[17] & 0x02);
+	}
+	/*
+	 * If the backend device has been configured to force APTPL metadata
+	 * write-out, go ahead and propigate aptpl=1 down now.
+	 */
+	if (dev->dev_attrib.force_pr_aptpl)
+		aptpl = 1;
+
+	transport_kunmap_data_sg(cmd);
+	buf = NULL;
+
+	/*
+	 * SPEC_I_PT=1 is only valid for Service action: REGISTER
+	 */
+	if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+		return TCM_INVALID_PARAMETER_LIST;
+
+	/*
+	 * From spc4r17 section 6.14:
+	 *
+	 * If the SPEC_I_PT bit is set to zero, the service action is not
+	 * REGISTER AND MOVE, and the parameter list length is not 24, then
+	 * the command shall be terminated with CHECK CONDITION status, with
+	 * the sense key set to ILLEGAL REQUEST, and the additional sense
+	 * code set to PARAMETER LIST LENGTH ERROR.
+	 */
+	if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+	    (cmd->data_length != 24)) {
+		pr_warn("SPC-PR: Received PR OUT illegal parameter"
+			" list length: %u\n", cmd->data_length);
+		return TCM_INVALID_PARAMETER_LIST;
+	}
+
+	/*
+	 * (core_scsi3_emulate_pro_* function parameters
+	 * are defined by spc4r17 Table 174:
+	 * PERSISTENT_RESERVE_OUT service actions and valid parameters.
+	 */
+	switch (sa) {
+	case PRO_REGISTER:
+		ret = core_scsi3_emulate_pro_register(cmd,
+			res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, REGISTER);
+		break;
+	case PRO_RESERVE:
+		ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key);
+		break;
+	case PRO_RELEASE:
+		ret = core_scsi3_emulate_pro_release(cmd, type, scope, res_key);
+		break;
+	case PRO_CLEAR:
+		ret = core_scsi3_emulate_pro_clear(cmd, res_key);
+		break;
+	case PRO_PREEMPT:
+		ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
+					res_key, sa_res_key, PREEMPT);
+		break;
+	case PRO_PREEMPT_AND_ABORT:
+		ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
+					res_key, sa_res_key, PREEMPT_AND_ABORT);
+		break;
+	case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+		ret = core_scsi3_emulate_pro_register(cmd,
+			0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, REGISTER_AND_IGNORE_EXISTING_KEY);
+		break;
+	case PRO_REGISTER_AND_MOVE:
+		ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key,
+				sa_res_key, aptpl, unreg);
+		break;
+	default:
+		pr_err("Unknown PERSISTENT_RESERVE_OUT service"
+			" action: 0x%02x\n", cdb[1] & 0x1f);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
+	return ret;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_KEYS
+ *
+ * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
+ */
+static sense_reason_t
+core_scsi3_pri_read_keys(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct t10_pr_registration *pr_reg;
+	unsigned char *buf;
+	u32 add_len = 0, off = 8;
+
+	if (cmd->data_length < 8) {
+		pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+	buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+	buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+	buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+	spin_lock(&dev->t10_pr.registration_lock);
+	list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
+			pr_reg_list) {
+		/*
+		 * Check for overflow of 8byte PRI READ_KEYS payload and
+		 * next reservation key list descriptor.
+		 */
+		if ((add_len + 8) > (cmd->data_length - 8))
+			break;
+
+		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+		buf[off++] = (pr_reg->pr_res_key & 0xff);
+
+		add_len += 8;
+	}
+	spin_unlock(&dev->t10_pr.registration_lock);
+
+	buf[4] = ((add_len >> 24) & 0xff);
+	buf[5] = ((add_len >> 16) & 0xff);
+	buf[6] = ((add_len >> 8) & 0xff);
+	buf[7] = (add_len & 0xff);
+
+	transport_kunmap_data_sg(cmd);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION
+ *
+ * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
+ */
+static sense_reason_t
+core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct t10_pr_registration *pr_reg;
+	unsigned char *buf;
+	u64 pr_res_key;
+	u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
+
+	if (cmd->data_length < 8) {
+		pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+	buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+	buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+	buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (pr_reg) {
+		/*
+		 * Set the hardcoded Additional Length
+		 */
+		buf[4] = ((add_len >> 24) & 0xff);
+		buf[5] = ((add_len >> 16) & 0xff);
+		buf[6] = ((add_len >> 8) & 0xff);
+		buf[7] = (add_len & 0xff);
+
+		if (cmd->data_length < 22)
+			goto err;
+
+		/*
+		 * Set the Reservation key.
+		 *
+		 * From spc4r17, section 5.7.10:
+		 * A persistent reservation holder has its reservation key
+		 * returned in the parameter data from a PERSISTENT
+		 * RESERVE IN command with READ RESERVATION service action as
+		 * follows:
+		 * a) For a persistent reservation of the type Write Exclusive
+		 *    - All Registrants or Exclusive Access ­ All Regitrants,
+		 *      the reservation key shall be set to zero; or
+		 * b) For all other persistent reservation types, the
+		 *    reservation key shall be set to the registered
+		 *    reservation key for the I_T nexus that holds the
+		 *    persistent reservation.
+		 */
+		if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+			pr_res_key = 0;
+		else
+			pr_res_key = pr_reg->pr_res_key;
+
+		buf[8] = ((pr_res_key >> 56) & 0xff);
+		buf[9] = ((pr_res_key >> 48) & 0xff);
+		buf[10] = ((pr_res_key >> 40) & 0xff);
+		buf[11] = ((pr_res_key >> 32) & 0xff);
+		buf[12] = ((pr_res_key >> 24) & 0xff);
+		buf[13] = ((pr_res_key >> 16) & 0xff);
+		buf[14] = ((pr_res_key >> 8) & 0xff);
+		buf[15] = (pr_res_key & 0xff);
+		/*
+		 * Set the SCOPE and TYPE
+		 */
+		buf[21] = (pr_reg->pr_res_scope & 0xf0) |
+			  (pr_reg->pr_res_type & 0x0f);
+	}
+
+err:
+	spin_unlock(&dev->dev_reservation_lock);
+	transport_kunmap_data_sg(cmd);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES
+ *
+ * See spc4r17 section 6.13.4 Table 165
+ */
+static sense_reason_t
+core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	unsigned char *buf;
+	u16 add_len = 8; /* Hardcoded to 8. */
+
+	if (cmd->data_length < 6) {
+		pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+			" %u too small\n", cmd->data_length);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	buf[0] = ((add_len >> 8) & 0xff);
+	buf[1] = (add_len & 0xff);
+	buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
+	buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
+	buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
+	buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */
+	/*
+	 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+	 * set the TMV: Task Mask Valid bit.
+	 */
+	buf[3] |= 0x80;
+	/*
+	 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+	 */
+	buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+	/*
+	 * PTPL_A: Persistence across Target Power Loss Active bit
+	 */
+	if (pr_tmpl->pr_aptpl_active)
+		buf[3] |= 0x01;
+	/*
+	 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167
+	 */
+	buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+	buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+	buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+	buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+	buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+	buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+
+	transport_kunmap_data_sg(cmd);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS
+ *
+ * See spc4r17 section 6.13.5 Table 168 and 169
+ */
+static sense_reason_t
+core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_node_acl *se_nacl;
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+	unsigned char *buf;
+	u32 add_desc_len = 0, add_len = 0;
+	u32 off = 8; /* off into first Full Status descriptor */
+	int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
+	int exp_desc_len, desc_len;
+	bool all_reg = false;
+
+	if (cmd->data_length < 8) {
+		pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+	buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+	buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+	buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (dev->dev_pr_res_holder) {
+		struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
+
+		if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
+		    pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
+			all_reg = true;
+			pr_res_type = pr_holder->pr_res_type;
+			pr_res_scope = pr_holder->pr_res_scope;
+		}
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		se_nacl = pr_reg->pr_reg_nacl;
+		se_tpg = pr_reg->pr_reg_nacl->se_tpg;
+		add_desc_len = 0;
+
+		atomic_inc_mb(&pr_reg->pr_res_holders);
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * Determine expected length of $FABRIC_MOD specific
+		 * TransportID full status descriptor..
+		 */
+		exp_desc_len = target_get_pr_transport_id_len(se_nacl, pr_reg,
+					&format_code);
+		if (exp_desc_len < 0 ||
+		    exp_desc_len + add_len > cmd->data_length) {
+			pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
+				" out of buffer: %d\n", cmd->data_length);
+			spin_lock(&pr_tmpl->registration_lock);
+			atomic_dec_mb(&pr_reg->pr_res_holders);
+			break;
+		}
+		/*
+		 * Set RESERVATION KEY
+		 */
+		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+		buf[off++] = (pr_reg->pr_res_key & 0xff);
+		off += 4; /* Skip Over Reserved area */
+
+		/*
+		 * Set ALL_TG_PT bit if PROUT SA REGISTER had this set.
+		 */
+		if (pr_reg->pr_reg_all_tg_pt)
+			buf[off] = 0x02;
+		/*
+		 * The struct se_lun pointer will be present for the
+		 * reservation holder for PR_HOLDER bit.
+		 *
+		 * Also, if this registration is the reservation
+		 * holder or there is an All Registrants reservation
+		 * active, fill in SCOPE and TYPE in the next byte.
+		 */
+		if (pr_reg->pr_res_holder) {
+			buf[off++] |= 0x01;
+			buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
+				     (pr_reg->pr_res_type & 0x0f);
+		} else if (all_reg) {
+			buf[off++] |= 0x01;
+			buf[off++] = (pr_res_scope & 0xf0) |
+				     (pr_res_type & 0x0f);
+		} else {
+			off += 2;
+		}
+
+		off += 4; /* Skip over reserved area */
+		/*
+		 * From spc4r17 6.3.15:
+		 *
+		 * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT
+		 * IDENTIFIER field contains the relative port identifier (see
+		 * 3.1.120) of the target port that is part of the I_T nexus
+		 * described by this full status descriptor. If the ALL_TG_PT
+		 * bit is set to one, the contents of the RELATIVE TARGET PORT
+		 * IDENTIFIER field are not defined by this standard.
+		 */
+		if (!pr_reg->pr_reg_all_tg_pt) {
+			u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi;
+
+			buf[off++] = ((sep_rtpi >> 8) & 0xff);
+			buf[off++] = (sep_rtpi & 0xff);
+		} else
+			off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */
+
+		buf[off+4] = se_tpg->proto_id;
+
+		/*
+		 * Now, have the $FABRIC_MOD fill in the transport ID.
+		 */
+		desc_len = target_get_pr_transport_id(se_nacl, pr_reg,
+				&format_code, &buf[off+4]);
+
+		spin_lock(&pr_tmpl->registration_lock);
+		atomic_dec_mb(&pr_reg->pr_res_holders);
+
+		if (desc_len < 0)
+			break;
+		/*
+		 * Set the ADDITIONAL DESCRIPTOR LENGTH
+		 */
+		buf[off++] = ((desc_len >> 24) & 0xff);
+		buf[off++] = ((desc_len >> 16) & 0xff);
+		buf[off++] = ((desc_len >> 8) & 0xff);
+		buf[off++] = (desc_len & 0xff);
+		/*
+		 * Size of full desctipor header minus TransportID
+		 * containing $FABRIC_MOD specific) initiator device/port
+		 * WWN information.
+		 *
+		 *  See spc4r17 Section 6.13.5 Table 169
+		 */
+		add_desc_len = (24 + desc_len);
+
+		off += desc_len;
+		add_len += add_desc_len;
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Set ADDITIONAL_LENGTH
+	 */
+	buf[4] = ((add_len >> 24) & 0xff);
+	buf[5] = ((add_len >> 16) & 0xff);
+	buf[6] = ((add_len >> 8) & 0xff);
+	buf[7] = (add_len & 0xff);
+
+	transport_kunmap_data_sg(cmd);
+
+	return 0;
+}
+
+sense_reason_t
+target_scsi3_emulate_pr_in(struct se_cmd *cmd)
+{
+	sense_reason_t ret;
+
+	/*
+	 * Following spc2r20 5.5.1 Reservations overview:
+	 *
+	 * If a logical unit has been reserved by any RESERVE command and is
+	 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
+	 * PERSISTENT RESERVE OUT commands shall conflict regardless of
+	 * initiator or service action and shall terminate with a RESERVATION
+	 * CONFLICT status.
+	 */
+	if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
+		pr_err("Received PERSISTENT_RESERVE CDB while legacy"
+			" SPC-2 reservation is held, returning"
+			" RESERVATION_CONFLICT\n");
+		return TCM_RESERVATION_CONFLICT;
+	}
+
+	switch (cmd->t_task_cdb[1] & 0x1f) {
+	case PRI_READ_KEYS:
+		ret = core_scsi3_pri_read_keys(cmd);
+		break;
+	case PRI_READ_RESERVATION:
+		ret = core_scsi3_pri_read_reservation(cmd);
+		break;
+	case PRI_REPORT_CAPABILITIES:
+		ret = core_scsi3_pri_report_capabilities(cmd);
+		break;
+	case PRI_READ_FULL_STATUS:
+		ret = core_scsi3_pri_read_full_status(cmd);
+		break;
+	default:
+		pr_err("Unknown PERSISTENT_RESERVE_IN service"
+			" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
+	return ret;
+}
+
+sense_reason_t
+target_check_reservation(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	sense_reason_t ret;
+
+	if (!cmd->se_sess)
+		return 0;
+	if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
+		return 0;
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+		ret = target_scsi2_reservation_check(cmd);
+	else
+		ret = target_scsi3_pr_reservation_check(cmd);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
new file mode 100644
index 0000000..e3d26e9
--- /dev/null
+++ b/drivers/target/target_core_pr.h
@@ -0,0 +1,73 @@
+#ifndef TARGET_CORE_PR_H
+#define TARGET_CORE_PR_H
+/*
+ * PERSISTENT_RESERVE_OUT service action codes
+ *
+ * spc4r17 section 6.14.2 Table 171
+ */
+#define PRO_REGISTER				0x00
+#define PRO_RESERVE				0x01
+#define PRO_RELEASE				0x02
+#define PRO_CLEAR				0x03
+#define PRO_PREEMPT				0x04
+#define PRO_PREEMPT_AND_ABORT			0x05
+#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY	0x06
+#define PRO_REGISTER_AND_MOVE			0x07
+/*
+ * PERSISTENT_RESERVE_IN service action codes
+ *
+ * spc4r17 section 6.13.1 Table 159
+ */
+#define PRI_READ_KEYS				0x00
+#define PRI_READ_RESERVATION			0x01
+#define PRI_REPORT_CAPABILITIES			0x02
+#define PRI_READ_FULL_STATUS			0x03
+/*
+ * PERSISTENT_RESERVE_ SCOPE field
+ *
+ * spc4r17 section 6.13.3.3 Table 163
+ */
+#define PR_SCOPE_LU_SCOPE			0x00
+/*
+ * PERSISTENT_RESERVE_* TYPE field
+ *
+ * spc4r17 section 6.13.3.4 Table 164
+ */
+#define PR_TYPE_WRITE_EXCLUSIVE			0x01
+#define PR_TYPE_EXCLUSIVE_ACCESS		0x03
+#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY		0x05
+#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY	0x06
+#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG		0x07
+#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG		0x08
+
+#define PR_APTPL_MAX_IPORT_LEN			256
+#define PR_APTPL_MAX_TPORT_LEN			256
+
+/*
+ *  Function defined in target_core_spc.c
+ */
+void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
+
+extern struct kmem_cache *t10_pr_reg_cache;
+
+extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
+			char *, u32);
+extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
+extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
+extern int core_scsi3_alloc_aptpl_registration(
+			struct t10_reservation *, u64,
+			unsigned char *, unsigned char *, u64,
+			unsigned char *, u16, u64, int, int, u8);
+extern int core_scsi3_check_aptpl_registration(struct se_device *,
+			struct se_portal_group *, struct se_lun *,
+			struct se_node_acl *, u64);
+extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
+					     struct se_node_acl *);
+extern void core_scsi3_free_all_registrations(struct se_device *);
+extern unsigned char *core_scsi3_pr_dump_type(int);
+
+extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
+extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
+extern sense_reason_t target_check_reservation(struct se_cmd *);
+
+#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
new file mode 100644
index 0000000..d72a405
--- /dev/null
+++ b/drivers/target/target_core_pscsi.c
@@ -0,0 +1,1137 @@
+/*******************************************************************************
+ * Filename:  target_core_pscsi.c
+ *
+ * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
+ *
+ * (c) Copyright 2003-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/blk_types.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/genhd.h>
+#include <linux/cdrom.h>
+#include <linux/ratelimit.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+
+#include "target_core_alua.h"
+#include "target_core_internal.h"
+#include "target_core_pscsi.h"
+
+#define ISPRINT(a)  ((a >= ' ') && (a <= '~'))
+
+static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
+{
+	return container_of(dev, struct pscsi_dev_virt, dev);
+}
+
+static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
+static void pscsi_req_done(struct request *, int);
+
+/*	pscsi_attach_hba():
+ *
+ * 	pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
+ *	from the passed SCSI Host ID.
+ */
+static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct pscsi_hba_virt *phv;
+
+	phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
+	if (!phv) {
+		pr_err("Unable to allocate struct pscsi_hba_virt\n");
+		return -ENOMEM;
+	}
+	phv->phv_host_id = host_id;
+	phv->phv_mode = PHV_VIRTUAL_HOST_ID;
+
+	hba->hba_ptr = phv;
+
+	pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		PSCSI_VERSION, TARGET_CORE_VERSION);
+	pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
+	       hba->hba_id);
+
+	return 0;
+}
+
+static void pscsi_detach_hba(struct se_hba *hba)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	struct Scsi_Host *scsi_host = phv->phv_lld_host;
+
+	if (scsi_host) {
+		scsi_host_put(scsi_host);
+
+		pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
+			" Generic Target Core\n", hba->hba_id,
+			(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
+			"Unknown");
+	} else
+		pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
+			" from Generic Target Core\n", hba->hba_id);
+
+	kfree(phv);
+	hba->hba_ptr = NULL;
+}
+
+static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	struct Scsi_Host *sh = phv->phv_lld_host;
+	/*
+	 * Release the struct Scsi_Host
+	 */
+	if (!mode_flag) {
+		if (!sh)
+			return 0;
+
+		phv->phv_lld_host = NULL;
+		phv->phv_mode = PHV_VIRTUAL_HOST_ID;
+
+		pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+			" %s\n", hba->hba_id, (sh->hostt->name) ?
+			(sh->hostt->name) : "Unknown");
+
+		scsi_host_put(sh);
+		return 0;
+	}
+	/*
+	 * Otherwise, locate struct Scsi_Host from the original passed
+	 * pSCSI Host ID and enable for phba mode
+	 */
+	sh = scsi_host_lookup(phv->phv_host_id);
+	if (!sh) {
+		pr_err("pSCSI: Unable to locate SCSI Host for"
+			" phv_host_id: %d\n", phv->phv_host_id);
+		return -EINVAL;
+	}
+
+	phv->phv_lld_host = sh;
+	phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
+
+	pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+		hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
+
+	return 1;
+}
+
+static void pscsi_tape_read_blocksize(struct se_device *dev,
+		struct scsi_device *sdev)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+	int ret;
+
+	buf = kzalloc(12, GFP_KERNEL);
+	if (!buf)
+		goto out_free;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = MODE_SENSE;
+	cdb[4] = 0x0c; /* 12 bytes */
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
+			HZ, 1, NULL);
+	if (ret)
+		goto out_free;
+
+	/*
+	 * If MODE_SENSE still returns zero, set the default value to 1024.
+	 */
+	sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
+	if (!sdev->sector_size)
+		sdev->sector_size = 1024;
+
+	kfree(buf);
+}
+
+static void
+pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+	unsigned char *buf;
+
+	if (sdev->inquiry_len < INQUIRY_LEN)
+		return;
+
+	buf = sdev->inquiry;
+	if (!buf)
+		return;
+	/*
+	 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
+	 */
+	memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
+	memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
+	memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
+}
+
+static int
+pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+	int ret;
+
+	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = INQUIRY;
+	cdb[1] = 0x01; /* Query VPD */
+	cdb[2] = 0x80; /* Unit Serial Number */
+	cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
+	cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+			      INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
+	if (ret)
+		goto out_free;
+
+	snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
+
+	wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL;
+
+	kfree(buf);
+	return 0;
+
+out_free:
+	kfree(buf);
+	return -EPERM;
+}
+
+static void
+pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
+		struct t10_wwn *wwn)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
+	int ident_len, page_len, off = 4, ret;
+	struct t10_vpd *vpd;
+
+	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = INQUIRY;
+	cdb[1] = 0x01; /* Query VPD */
+	cdb[2] = 0x83; /* Device Identifier */
+	cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
+	cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+			      INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
+			      NULL, HZ, 1, NULL);
+	if (ret)
+		goto out;
+
+	page_len = (buf[2] << 8) | buf[3];
+	while (page_len > 0) {
+		/* Grab a pointer to the Identification descriptor */
+		page_83 = &buf[off];
+		ident_len = page_83[3];
+		if (!ident_len) {
+			pr_err("page_83[3]: identifier"
+					" length zero!\n");
+			break;
+		}
+		pr_debug("T10 VPD Identifier Length: %d\n", ident_len);
+
+		vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
+		if (!vpd) {
+			pr_err("Unable to allocate memory for"
+					" struct t10_vpd\n");
+			goto out;
+		}
+		INIT_LIST_HEAD(&vpd->vpd_list);
+
+		transport_set_vpd_proto_id(vpd, page_83);
+		transport_set_vpd_assoc(vpd, page_83);
+
+		if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
+			off += (ident_len + 4);
+			page_len -= (ident_len + 4);
+			kfree(vpd);
+			continue;
+		}
+		if (transport_set_vpd_ident(vpd, page_83) < 0) {
+			off += (ident_len + 4);
+			page_len -= (ident_len + 4);
+			kfree(vpd);
+			continue;
+		}
+
+		list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
+		off += (ident_len + 4);
+		page_len -= (ident_len + 4);
+	}
+
+out:
+	kfree(buf);
+}
+
+static int pscsi_add_device_to_list(struct se_device *dev,
+		struct scsi_device *sd)
+{
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+	struct request_queue *q = sd->request_queue;
+
+	pdv->pdv_sd = sd;
+
+	if (!sd->queue_depth) {
+		sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
+
+		pr_err("Set broken SCSI Device %d:%d:%llu"
+			" queue_depth to %d\n", sd->channel, sd->id,
+				sd->lun, sd->queue_depth);
+	}
+
+	dev->dev_attrib.hw_block_size =
+		min_not_zero((int)sd->sector_size, 512);
+	dev->dev_attrib.hw_max_sectors =
+		min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
+	dev->dev_attrib.hw_queue_depth = sd->queue_depth;
+
+	/*
+	 * Setup our standard INQUIRY info into se_dev->t10_wwn
+	 */
+	pscsi_set_inquiry_info(sd, &dev->t10_wwn);
+
+	/*
+	 * Locate VPD WWN Information used for various purposes within
+	 * the Storage Engine.
+	 */
+	if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) {
+		/*
+		 * If VPD Unit Serial returned GOOD status, try
+		 * VPD Device Identification page (0x83).
+		 */
+		pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn);
+	}
+
+	/*
+	 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+	 */
+	if (sd->type == TYPE_TAPE) {
+		pscsi_tape_read_blocksize(dev, sd);
+		dev->dev_attrib.hw_block_size = sd->sector_size;
+	}
+	return 0;
+}
+
+static struct se_device *pscsi_alloc_device(struct se_hba *hba,
+		const char *name)
+{
+	struct pscsi_dev_virt *pdv;
+
+	pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
+	if (!pdv) {
+		pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
+		return NULL;
+	}
+
+	pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+	return &pdv->dev;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
+	__releases(sh->host_lock)
+{
+	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+	struct Scsi_Host *sh = sd->host;
+	struct block_device *bd;
+	int ret;
+
+	if (scsi_device_get(sd)) {
+		pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
+			sh->host_no, sd->channel, sd->id, sd->lun);
+		spin_unlock_irq(sh->host_lock);
+		return -EIO;
+	}
+	spin_unlock_irq(sh->host_lock);
+	/*
+	 * Claim exclusive struct block_device access to struct scsi_device
+	 * for TYPE_DISK using supplied udev_path
+	 */
+	bd = blkdev_get_by_path(dev->udev_path,
+				FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
+	if (IS_ERR(bd)) {
+		pr_err("pSCSI: blkdev_get_by_path() failed\n");
+		scsi_device_put(sd);
+		return PTR_ERR(bd);
+	}
+	pdv->pdv_bd = bd;
+
+	ret = pscsi_add_device_to_list(dev, sd);
+	if (ret) {
+		blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+		scsi_device_put(sd);
+		return ret;
+	}
+
+	pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%llu\n",
+		phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
+	return 0;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
+	__releases(sh->host_lock)
+{
+	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	int ret;
+
+	if (scsi_device_get(sd)) {
+		pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
+			sh->host_no, sd->channel, sd->id, sd->lun);
+		spin_unlock_irq(sh->host_lock);
+		return -EIO;
+	}
+	spin_unlock_irq(sh->host_lock);
+
+	ret = pscsi_add_device_to_list(dev, sd);
+	if (ret) {
+		scsi_device_put(sd);
+		return ret;
+	}
+	pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
+		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+		sd->channel, sd->id, sd->lun);
+
+	return 0;
+}
+
+static int pscsi_configure_device(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+	struct scsi_device *sd;
+	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+	struct Scsi_Host *sh = phv->phv_lld_host;
+	int legacy_mode_enable = 0;
+	int ret;
+
+	if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+	    !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+	    !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+		pr_err("Missing scsi_channel_id=, scsi_target_id= and"
+			" scsi_lun_id= parameters\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
+	 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
+	 */
+	if (!sh) {
+		if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+			pr_err("pSCSI: Unable to locate struct"
+				" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
+			return -ENODEV;
+		}
+		/*
+		 * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
+		 * reference, we enforce that udev_path has been set
+		 */
+		if (!(dev->dev_flags & DF_USING_UDEV_PATH)) {
+			pr_err("pSCSI: udev_path attribute has not"
+				" been set before ENABLE=1\n");
+			return -EINVAL;
+		}
+		/*
+		 * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
+		 * use the original TCM hba ID to reference Linux/SCSI Host No
+		 * and enable for PHV_LLD_SCSI_HOST_NO mode.
+		 */
+		if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
+			if (hba->dev_count) {
+				pr_err("pSCSI: Unable to set hba_mode"
+					" with active devices\n");
+				return -EEXIST;
+			}
+
+			if (pscsi_pmode_enable_hba(hba, 1) != 1)
+				return -ENODEV;
+
+			legacy_mode_enable = 1;
+			hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+			sh = phv->phv_lld_host;
+		} else {
+			sh = scsi_host_lookup(pdv->pdv_host_id);
+			if (!sh) {
+				pr_err("pSCSI: Unable to locate"
+					" pdv_host_id: %d\n", pdv->pdv_host_id);
+				return -EINVAL;
+			}
+			pdv->pdv_lld_host = sh;
+		}
+	} else {
+		if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
+			pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
+				" struct Scsi_Host exists\n");
+			return -EEXIST;
+		}
+	}
+
+	spin_lock_irq(sh->host_lock);
+	list_for_each_entry(sd, &sh->__devices, siblings) {
+		if ((pdv->pdv_channel_id != sd->channel) ||
+		    (pdv->pdv_target_id != sd->id) ||
+		    (pdv->pdv_lun_id != sd->lun))
+			continue;
+		/*
+		 * Functions will release the held struct scsi_host->host_lock
+		 * before calling calling pscsi_add_device_to_list() to register
+		 * struct scsi_device with target_core_mod.
+		 */
+		switch (sd->type) {
+		case TYPE_DISK:
+			ret = pscsi_create_type_disk(dev, sd);
+			break;
+		default:
+			ret = pscsi_create_type_nondisk(dev, sd);
+			break;
+		}
+
+		if (ret) {
+			if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
+				scsi_host_put(sh);
+			else if (legacy_mode_enable) {
+				pscsi_pmode_enable_hba(hba, 0);
+				hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+			}
+			pdv->pdv_sd = NULL;
+			return ret;
+		}
+		return 0;
+	}
+	spin_unlock_irq(sh->host_lock);
+
+	pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+		pdv->pdv_channel_id,  pdv->pdv_target_id, pdv->pdv_lun_id);
+
+	if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
+		scsi_host_put(sh);
+	else if (legacy_mode_enable) {
+		pscsi_pmode_enable_hba(hba, 0);
+		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+	}
+
+	return -ENODEV;
+}
+
+static void pscsi_dev_call_rcu(struct rcu_head *p)
+{
+	struct se_device *dev = container_of(p, struct se_device, rcu_head);
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+
+	kfree(pdv);
+}
+
+static void pscsi_free_device(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	if (sd) {
+		/*
+		 * Release exclusive pSCSI internal struct block_device claim for
+		 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
+		 */
+		if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
+			blkdev_put(pdv->pdv_bd,
+				   FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+			pdv->pdv_bd = NULL;
+		}
+		/*
+		 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
+		 * to struct Scsi_Host now.
+		 */
+		if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
+		    (phv->phv_lld_host != NULL))
+			scsi_host_put(phv->phv_lld_host);
+		else if (pdv->pdv_lld_host)
+			scsi_host_put(pdv->pdv_lld_host);
+
+		scsi_device_put(sd);
+
+		pdv->pdv_sd = NULL;
+	}
+	call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
+}
+
+static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
+				     unsigned char *sense_buffer)
+{
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
+	struct scsi_device *sd = pdv->pdv_sd;
+	int result;
+	struct pscsi_plugin_task *pt = cmd->priv;
+	unsigned char *cdb;
+	/*
+	 * Special case for REPORT_LUNs handling where pscsi_plugin_task has
+	 * not been allocated because TCM is handling the emulation directly.
+	 */
+	if (!pt)
+		return;
+
+	cdb = &pt->pscsi_cdb[0];
+	result = pt->pscsi_result;
+	/*
+	 * Hack to make sure that Write-Protect modepage is set if R/O mode is
+	 * forced.
+	 */
+	if (!cmd->data_length)
+		goto after_mode_sense;
+
+	if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
+	     (status_byte(result) << 1) == SAM_STAT_GOOD) {
+		bool read_only = target_lun_is_rdonly(cmd);
+
+		if (read_only) {
+			unsigned char *buf;
+
+			buf = transport_kmap_data_sg(cmd);
+			if (!buf)
+				; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
+
+			if (cdb[0] == MODE_SENSE_10) {
+				if (!(buf[3] & 0x80))
+					buf[3] |= 0x80;
+			} else {
+				if (!(buf[2] & 0x80))
+					buf[2] |= 0x80;
+			}
+
+			transport_kunmap_data_sg(cmd);
+		}
+	}
+after_mode_sense:
+
+	if (sd->type != TYPE_TAPE || !cmd->data_length)
+		goto after_mode_select;
+
+	/*
+	 * Hack to correctly obtain the initiator requested blocksize for
+	 * TYPE_TAPE.  Since this value is dependent upon each tape media,
+	 * struct scsi_device->sector_size will not contain the correct value
+	 * by default, so we go ahead and set it so
+	 * TRANSPORT(dev)->get_blockdev() returns the correct value to the
+	 * storage engine.
+	 */
+	if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
+	      (status_byte(result) << 1) == SAM_STAT_GOOD) {
+		unsigned char *buf;
+		u16 bdl;
+		u32 blocksize;
+
+		buf = sg_virt(&sg[0]);
+		if (!buf) {
+			pr_err("Unable to get buf for scatterlist\n");
+			goto after_mode_select;
+		}
+
+		if (cdb[0] == MODE_SELECT)
+			bdl = (buf[3]);
+		else
+			bdl = (buf[6] << 8) | (buf[7]);
+
+		if (!bdl)
+			goto after_mode_select;
+
+		if (cdb[0] == MODE_SELECT)
+			blocksize = (buf[9] << 16) | (buf[10] << 8) |
+					(buf[11]);
+		else
+			blocksize = (buf[13] << 16) | (buf[14] << 8) |
+					(buf[15]);
+
+		sd->sector_size = blocksize;
+	}
+after_mode_select:
+
+	if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) {
+		memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER);
+		cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+	}
+}
+
+enum {
+	Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
+	Opt_scsi_lun_id, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_scsi_host_id, "scsi_host_id=%d"},
+	{Opt_scsi_channel_id, "scsi_channel_id=%d"},
+	{Opt_scsi_target_id, "scsi_target_id=%d"},
+	{Opt_scsi_lun_id, "scsi_lun_id=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
+		const char *page, ssize_t count)
+{
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_scsi_host_id:
+			if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+				pr_err("PSCSI[%d]: Unable to accept"
+					" scsi_host_id while phv_mode =="
+					" PHV_LLD_SCSI_HOST_NO\n",
+					phv->phv_host_id);
+				ret = -EINVAL;
+				goto out;
+			}
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			pdv->pdv_host_id = arg;
+			pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
+				" %d\n", phv->phv_host_id, pdv->pdv_host_id);
+			pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
+			break;
+		case Opt_scsi_channel_id:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			pdv->pdv_channel_id = arg;
+			pr_debug("PSCSI[%d]: Referencing SCSI Channel"
+				" ID: %d\n",  phv->phv_host_id,
+				pdv->pdv_channel_id);
+			pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
+			break;
+		case Opt_scsi_target_id:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			pdv->pdv_target_id = arg;
+			pr_debug("PSCSI[%d]: Referencing SCSI Target"
+				" ID: %d\n", phv->phv_host_id,
+				pdv->pdv_target_id);
+			pdv->pdv_flags |= PDF_HAS_TARGET_ID;
+			break;
+		case Opt_scsi_lun_id:
+			ret = match_int(args, &arg);
+			if (ret)
+				goto out;
+			pdv->pdv_lun_id = arg;
+			pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
+				" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
+			pdv->pdv_flags |= PDF_HAS_LUN_ID;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
+{
+	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+	struct scsi_device *sd = pdv->pdv_sd;
+	unsigned char host_id[16];
+	ssize_t bl;
+	int i;
+
+	if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
+		snprintf(host_id, 16, "%d", pdv->pdv_host_id);
+	else
+		snprintf(host_id, 16, "PHBA Mode");
+
+	bl = sprintf(b, "SCSI Device Bus Location:"
+		" Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
+		pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
+		host_id);
+
+	if (sd) {
+		bl += sprintf(b + bl, "        ");
+		bl += sprintf(b + bl, "Vendor: ");
+		for (i = 0; i < 8; i++) {
+			if (ISPRINT(sd->vendor[i]))   /* printable character? */
+				bl += sprintf(b + bl, "%c", sd->vendor[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, " Model: ");
+		for (i = 0; i < 16; i++) {
+			if (ISPRINT(sd->model[i]))   /* printable character ? */
+				bl += sprintf(b + bl, "%c", sd->model[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, " Rev: ");
+		for (i = 0; i < 4; i++) {
+			if (ISPRINT(sd->rev[i]))   /* printable character ? */
+				bl += sprintf(b + bl, "%c", sd->rev[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, "\n");
+	}
+	return bl;
+}
+
+static void pscsi_bi_endio(struct bio *bio)
+{
+	bio_put(bio);
+}
+
+static inline struct bio *pscsi_get_bio(int nr_vecs)
+{
+	struct bio *bio;
+	/*
+	 * Use bio_malloc() following the comment in for bio -> struct request
+	 * in block/blk-core.c:blk_make_request()
+	 */
+	bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
+	if (!bio) {
+		pr_err("PSCSI: bio_kmalloc() failed\n");
+		return NULL;
+	}
+	bio->bi_end_io = pscsi_bi_endio;
+
+	return bio;
+}
+
+static sense_reason_t
+pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+		enum dma_data_direction data_direction, struct bio **hbio)
+{
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
+	struct bio *bio = NULL, *tbio = NULL;
+	struct page *page;
+	struct scatterlist *sg;
+	u32 data_len = cmd->data_length, i, len, bytes, off;
+	int nr_pages = (cmd->data_length + sgl[0].offset +
+			PAGE_SIZE - 1) >> PAGE_SHIFT;
+	int nr_vecs = 0, rc;
+	int rw = (data_direction == DMA_TO_DEVICE);
+
+	*hbio = NULL;
+
+	pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
+
+	for_each_sg(sgl, sg, sgl_nents, i) {
+		page = sg_page(sg);
+		off = sg->offset;
+		len = sg->length;
+
+		pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+			page, len, off);
+
+		/*
+		 * We only have one page of data in each sg element,
+		 * we can not cross a page boundary.
+		 */
+		if (off + len > PAGE_SIZE)
+			goto fail;
+
+		if (len > 0 && data_len > 0) {
+			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+			bytes = min(bytes, data_len);
+
+			if (!bio) {
+				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
+				nr_pages -= nr_vecs;
+				/*
+				 * Calls bio_kmalloc() and sets bio->bi_end_io()
+				 */
+				bio = pscsi_get_bio(nr_vecs);
+				if (!bio)
+					goto fail;
+
+				if (rw)
+					bio->bi_rw |= REQ_WRITE;
+
+				pr_debug("PSCSI: Allocated bio: %p,"
+					" dir: %s nr_vecs: %d\n", bio,
+					(rw) ? "rw" : "r", nr_vecs);
+				/*
+				 * Set *hbio pointer to handle the case:
+				 * nr_pages > BIO_MAX_PAGES, where additional
+				 * bios need to be added to complete a given
+				 * command.
+				 */
+				if (!*hbio)
+					*hbio = tbio = bio;
+				else
+					tbio = tbio->bi_next = bio;
+			}
+
+			pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
+				" bio: %p page: %p len: %d off: %d\n", i, bio,
+				page, len, off);
+
+			rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
+					bio, page, bytes, off);
+			if (rc != bytes)
+				goto fail;
+
+			pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+				bio->bi_vcnt, nr_vecs);
+
+			if (bio->bi_vcnt > nr_vecs) {
+				pr_debug("PSCSI: Reached bio->bi_vcnt max:"
+					" %d i: %d bio: %p, allocating another"
+					" bio\n", bio->bi_vcnt, i, bio);
+				/*
+				 * Clear the pointer so that another bio will
+				 * be allocated with pscsi_get_bio() above, the
+				 * current bio has already been set *tbio and
+				 * bio->bi_next.
+				 */
+				bio = NULL;
+			}
+
+			data_len -= bytes;
+		}
+	}
+
+	return 0;
+fail:
+	while (*hbio) {
+		bio = *hbio;
+		*hbio = (*hbio)->bi_next;
+		bio_endio(bio);
+	}
+	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
+static sense_reason_t
+pscsi_parse_cdb(struct se_cmd *cmd)
+{
+	if (cmd->se_cmd_flags & SCF_BIDI)
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+	return passthrough_parse_cdb(cmd, pscsi_execute_cmd);
+}
+
+static sense_reason_t
+pscsi_execute_cmd(struct se_cmd *cmd)
+{
+	struct scatterlist *sgl = cmd->t_data_sg;
+	u32 sgl_nents = cmd->t_data_nents;
+	enum dma_data_direction data_direction = cmd->data_direction;
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
+	struct pscsi_plugin_task *pt;
+	struct request *req;
+	struct bio *hbio;
+	sense_reason_t ret;
+
+	/*
+	 * Dynamically alloc cdb space, since it may be larger than
+	 * TCM_MAX_COMMAND_SIZE
+	 */
+	pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
+	if (!pt) {
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	cmd->priv = pt;
+
+	memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
+		scsi_command_size(cmd->t_task_cdb));
+
+	if (!sgl) {
+		req = blk_get_request(pdv->pdv_sd->request_queue,
+				(data_direction == DMA_TO_DEVICE),
+				GFP_KERNEL);
+		if (IS_ERR(req)) {
+			pr_err("PSCSI: blk_get_request() failed\n");
+			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			goto fail;
+		}
+
+		blk_rq_set_block_pc(req);
+	} else {
+		BUG_ON(!cmd->data_length);
+
+		ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
+		if (ret)
+			goto fail;
+
+		req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
+				       GFP_KERNEL);
+		if (IS_ERR(req)) {
+			pr_err("pSCSI: blk_make_request() failed\n");
+			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			goto fail_free_bio;
+		}
+	}
+
+	req->end_io = pscsi_req_done;
+	req->end_io_data = cmd;
+	req->cmd_len = scsi_command_size(pt->pscsi_cdb);
+	req->cmd = &pt->pscsi_cdb[0];
+	req->sense = &pt->pscsi_sense[0];
+	req->sense_len = 0;
+	if (pdv->pdv_sd->type == TYPE_DISK)
+		req->timeout = PS_TIMEOUT_DISK;
+	else
+		req->timeout = PS_TIMEOUT_OTHER;
+	req->retries = PS_RETRY;
+
+	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
+			(cmd->sam_task_attr == TCM_HEAD_TAG),
+			pscsi_req_done);
+
+	return 0;
+
+fail_free_bio:
+	while (hbio) {
+		struct bio *bio = hbio;
+		hbio = hbio->bi_next;
+		bio_endio(bio);
+	}
+	ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+fail:
+	kfree(pt);
+	return ret;
+}
+
+/*	pscsi_get_device_type():
+ *
+ *
+ */
+static u32 pscsi_get_device_type(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	return (sd) ? sd->type : TYPE_NO_LUN;
+}
+
+static sector_t pscsi_get_blocks(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+
+	if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+		return pdv->pdv_bd->bd_part->nr_sects;
+
+	return 0;
+}
+
+static void pscsi_req_done(struct request *req, int uptodate)
+{
+	struct se_cmd *cmd = req->end_io_data;
+	struct pscsi_plugin_task *pt = cmd->priv;
+
+	pt->pscsi_result = req->errors;
+	pt->pscsi_resid = req->resid_len;
+
+	cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
+	if (cmd->scsi_status) {
+		pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
+			pt->pscsi_result);
+	}
+
+	switch (host_byte(pt->pscsi_result)) {
+	case DID_OK:
+		target_complete_cmd(cmd, cmd->scsi_status);
+		break;
+	default:
+		pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
+			pt->pscsi_result);
+		target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
+		break;
+	}
+
+	__blk_put_request(req->q, req);
+	kfree(pt);
+}
+
+static const struct target_backend_ops pscsi_ops = {
+	.name			= "pscsi",
+	.owner			= THIS_MODULE,
+	.transport_flags	= TRANSPORT_FLAG_PASSTHROUGH,
+	.attach_hba		= pscsi_attach_hba,
+	.detach_hba		= pscsi_detach_hba,
+	.pmode_enable_hba	= pscsi_pmode_enable_hba,
+	.alloc_device		= pscsi_alloc_device,
+	.configure_device	= pscsi_configure_device,
+	.free_device		= pscsi_free_device,
+	.transport_complete	= pscsi_transport_complete,
+	.parse_cdb		= pscsi_parse_cdb,
+	.set_configfs_dev_params = pscsi_set_configfs_dev_params,
+	.show_configfs_dev_params = pscsi_show_configfs_dev_params,
+	.get_device_type	= pscsi_get_device_type,
+	.get_blocks		= pscsi_get_blocks,
+	.tb_dev_attrib_attrs	= passthrough_attrib_attrs,
+};
+
+static int __init pscsi_module_init(void)
+{
+	return transport_backend_register(&pscsi_ops);
+}
+
+static void __exit pscsi_module_exit(void)
+{
+	target_backend_unregister(&pscsi_ops);
+}
+
+MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(pscsi_module_init);
+module_exit(pscsi_module_exit);
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
new file mode 100644
index 0000000..6d2007e
--- /dev/null
+++ b/drivers/target/target_core_pscsi.h
@@ -0,0 +1,62 @@
+#ifndef TARGET_CORE_PSCSI_H
+#define TARGET_CORE_PSCSI_H
+
+#define PSCSI_VERSION		"v4.0"
+
+/* used in pscsi_find_alloc_len() */
+#ifndef INQUIRY_DATA_SIZE
+#define INQUIRY_DATA_SIZE	0x24
+#endif
+
+/* used in pscsi_add_device_to_list() */
+#define PSCSI_DEFAULT_QUEUEDEPTH	1
+
+#define PS_RETRY		5
+#define PS_TIMEOUT_DISK		(15*HZ)
+#define PS_TIMEOUT_OTHER	(500*HZ)
+
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/kobject.h>
+
+struct scsi_device;
+
+struct pscsi_plugin_task {
+	unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER];
+	int	pscsi_direction;
+	int	pscsi_result;
+	u32	pscsi_resid;
+	unsigned char pscsi_cdb[0];
+} ____cacheline_aligned;
+
+#define PDF_HAS_CHANNEL_ID	0x01
+#define PDF_HAS_TARGET_ID	0x02
+#define PDF_HAS_LUN_ID		0x04
+#define PDF_HAS_VPD_UNIT_SERIAL 0x08
+#define PDF_HAS_VPD_DEV_IDENT	0x10
+#define PDF_HAS_VIRT_HOST_ID	0x20
+
+struct pscsi_dev_virt {
+	struct se_device dev;
+	int	pdv_flags;
+	int	pdv_host_id;
+	int	pdv_channel_id;
+	int	pdv_target_id;
+	int	pdv_lun_id;
+	struct block_device *pdv_bd;
+	struct scsi_device *pdv_sd;
+	struct Scsi_Host *pdv_lld_host;
+} ____cacheline_aligned;
+
+typedef enum phv_modes {
+	PHV_VIRTUAL_HOST_ID,
+	PHV_LLD_SCSI_HOST_NO
+} phv_modes_t;
+
+struct pscsi_hba_virt {
+	int			phv_host_id;
+	phv_modes_t		phv_mode;
+	struct Scsi_Host	*phv_lld_host;
+} ____cacheline_aligned;
+
+#endif   /*** TARGET_CORE_PSCSI_H ***/
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
new file mode 100644
index 0000000..47a833f
--- /dev/null
+++ b/drivers/target/target_core_rd.c
@@ -0,0 +1,689 @@
+/*******************************************************************************
+ * Filename:  target_core_rd.c
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport
+ * specific functions.
+ *
+ * (c) Copyright 2003-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi_proto.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+
+#include "target_core_rd.h"
+
+static inline struct rd_dev *RD_DEV(struct se_device *dev)
+{
+	return container_of(dev, struct rd_dev, dev);
+}
+
+static int rd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct rd_host *rd_host;
+
+	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
+	if (!rd_host) {
+		pr_err("Unable to allocate memory for struct rd_host\n");
+		return -ENOMEM;
+	}
+
+	rd_host->rd_host_id = host_id;
+
+	hba->hba_ptr = rd_host;
+
+	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		RD_HBA_VERSION, TARGET_CORE_VERSION);
+
+	return 0;
+}
+
+static void rd_detach_hba(struct se_hba *hba)
+{
+	struct rd_host *rd_host = hba->hba_ptr;
+
+	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
+
+	kfree(rd_host);
+	hba->hba_ptr = NULL;
+}
+
+static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+				 u32 sg_table_count)
+{
+	struct page *pg;
+	struct scatterlist *sg;
+	u32 i, j, page_count = 0, sg_per_table;
+
+	for (i = 0; i < sg_table_count; i++) {
+		sg = sg_table[i].sg_table;
+		sg_per_table = sg_table[i].rd_sg_count;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = sg_page(&sg[j]);
+			if (pg) {
+				__free_page(pg);
+				page_count++;
+			}
+		}
+		kfree(sg);
+	}
+
+	kfree(sg_table);
+	return page_count;
+}
+
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+	u32 page_count;
+
+	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+		return;
+
+	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
+					  rd_dev->sg_table_count);
+
+	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
+		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
+		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+	rd_dev->sg_table_array = NULL;
+	rd_dev->sg_table_count = 0;
+}
+
+
+/*	rd_build_device_space():
+ *
+ *
+ */
+static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+				 u32 total_sg_needed, unsigned char init_payload)
+{
+	u32 i = 0, j, page_offset = 0, sg_per_table;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	struct page *pg;
+	struct scatterlist *sg;
+	unsigned char *p;
+
+	while (total_sg_needed) {
+		unsigned int chain_entry = 0;
+
+		sg_per_table = (total_sg_needed > max_sg_per_table) ?
+			max_sg_per_table : total_sg_needed;
+
+		/*
+		 * Reserve extra element for chain entry
+		 */
+		if (sg_per_table < total_sg_needed)
+			chain_entry = 1;
+
+		sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
+				GFP_KERNEL);
+		if (!sg) {
+			pr_err("Unable to allocate scatterlist array"
+				" for struct rd_dev\n");
+			return -ENOMEM;
+		}
+
+		sg_init_table(sg, sg_per_table + chain_entry);
+
+		if (i > 0) {
+			sg_chain(sg_table[i - 1].sg_table,
+				 max_sg_per_table + 1, sg);
+		}
+
+		sg_table[i].sg_table = sg;
+		sg_table[i].rd_sg_count = sg_per_table;
+		sg_table[i].page_start_offset = page_offset;
+		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
+						- 1;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = alloc_pages(GFP_KERNEL, 0);
+			if (!pg) {
+				pr_err("Unable to allocate scatterlist"
+					" pages for struct rd_dev_sg_table\n");
+				return -ENOMEM;
+			}
+			sg_assign_page(&sg[j], pg);
+			sg[j].length = PAGE_SIZE;
+
+			p = kmap(pg);
+			memset(p, init_payload, PAGE_SIZE);
+			kunmap(pg);
+		}
+
+		page_offset += sg_per_table;
+		total_sg_needed -= sg_per_table;
+	}
+
+	return 0;
+}
+
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+	struct rd_dev_sg_table *sg_table;
+	u32 sg_tables, total_sg_needed;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	int rc;
+
+	if (rd_dev->rd_page_count <= 0) {
+		pr_err("Illegal page count: %u for Ramdisk device\n",
+		       rd_dev->rd_page_count);
+		return -EINVAL;
+	}
+
+	/* Don't need backing pages for NULLIO */
+	if (rd_dev->rd_flags & RDF_NULLIO)
+		return 0;
+
+	total_sg_needed = rd_dev->rd_page_count;
+
+	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+	if (!sg_table) {
+		pr_err("Unable to allocate memory for Ramdisk"
+		       " scatterlist tables\n");
+		return -ENOMEM;
+	}
+
+	rd_dev->sg_table_array = sg_table;
+	rd_dev->sg_table_count = sg_tables;
+
+	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
+	if (rc)
+		return rc;
+
+	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		 rd_dev->sg_table_count);
+
+	return 0;
+}
+
+static void rd_release_prot_space(struct rd_dev *rd_dev)
+{
+	u32 page_count;
+
+	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
+		return;
+
+	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
+					  rd_dev->sg_prot_count);
+
+	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
+		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
+		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+	rd_dev->sg_prot_array = NULL;
+	rd_dev->sg_prot_count = 0;
+}
+
+static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
+{
+	struct rd_dev_sg_table *sg_table;
+	u32 total_sg_needed, sg_tables;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	int rc;
+
+	if (rd_dev->rd_flags & RDF_NULLIO)
+		return 0;
+	/*
+	 * prot_length=8byte dif data
+	 * tot sg needed = rd_page_count * (PGSZ/block_size) *
+	 * 		   (prot_length/block_size) + pad
+	 * PGSZ canceled each other.
+	 */
+	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
+
+	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+	if (!sg_table) {
+		pr_err("Unable to allocate memory for Ramdisk protection"
+		       " scatterlist tables\n");
+		return -ENOMEM;
+	}
+
+	rd_dev->sg_prot_array = sg_table;
+	rd_dev->sg_prot_count = sg_tables;
+
+	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
+	if (rc)
+		return rc;
+
+	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
+		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
+
+	return 0;
+}
+
+static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
+{
+	struct rd_dev *rd_dev;
+	struct rd_host *rd_host = hba->hba_ptr;
+
+	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
+	if (!rd_dev) {
+		pr_err("Unable to allocate memory for struct rd_dev\n");
+		return NULL;
+	}
+
+	rd_dev->rd_host = rd_host;
+
+	return &rd_dev->dev;
+}
+
+static int rd_configure_device(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = RD_DEV(dev);
+	struct rd_host *rd_host = dev->se_hba->hba_ptr;
+	int ret;
+
+	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+		pr_debug("Missing rd_pages= parameter\n");
+		return -EINVAL;
+	}
+
+	ret = rd_build_device_space(rd_dev);
+	if (ret < 0)
+		goto fail;
+
+	dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
+	dev->dev_attrib.hw_max_sectors = UINT_MAX;
+	dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+	dev->dev_attrib.is_nonrot = 1;
+
+	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
+
+	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
+		" %u pages in %u tables, %lu total bytes\n",
+		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count,
+		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
+
+	return 0;
+
+fail:
+	rd_release_device_space(rd_dev);
+	return ret;
+}
+
+static void rd_dev_call_rcu(struct rcu_head *p)
+{
+	struct se_device *dev = container_of(p, struct se_device, rcu_head);
+	struct rd_dev *rd_dev = RD_DEV(dev);
+
+	kfree(rd_dev);
+}
+
+static void rd_free_device(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = RD_DEV(dev);
+
+	rd_release_device_space(rd_dev);
+	call_rcu(&dev->rcu_head, rd_dev_call_rcu);
+}
+
+static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
+{
+	struct rd_dev_sg_table *sg_table;
+	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+
+	i = page / sg_per_table;
+	if (i < rd_dev->sg_table_count) {
+		sg_table = &rd_dev->sg_table_array[i];
+		if ((sg_table->page_start_offset <= page) &&
+		    (sg_table->page_end_offset >= page))
+			return sg_table;
+	}
+
+	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
+			page);
+
+	return NULL;
+}
+
+static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
+{
+	struct rd_dev_sg_table *sg_table;
+	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+
+	i = page / sg_per_table;
+	if (i < rd_dev->sg_prot_count) {
+		sg_table = &rd_dev->sg_prot_array[i];
+		if ((sg_table->page_start_offset <= page) &&
+		     (sg_table->page_end_offset >= page))
+			return sg_table;
+	}
+
+	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
+			page);
+
+	return NULL;
+}
+
+static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	struct rd_dev *dev = RD_DEV(se_dev);
+	struct rd_dev_sg_table *prot_table;
+	bool need_to_release = false;
+	struct scatterlist *prot_sg;
+	u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+	u32 prot_offset, prot_page;
+	u32 prot_npages __maybe_unused;
+	u64 tmp;
+	sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	tmp = cmd->t_task_lba * se_dev->prot_length;
+	prot_offset = do_div(tmp, PAGE_SIZE);
+	prot_page = tmp;
+
+	prot_table = rd_get_prot_table(dev, prot_page);
+	if (!prot_table)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	prot_sg = &prot_table->sg_table[prot_page -
+					prot_table->page_start_offset];
+
+	if (is_read)
+		rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
+				    prot_sg, prot_offset);
+	else
+		rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
+				    cmd->t_prot_sg, 0);
+
+	if (!rc)
+		sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
+
+	if (need_to_release)
+		kfree(prot_sg);
+
+	return rc;
+}
+
+static sense_reason_t
+rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+	      enum dma_data_direction data_direction)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	struct rd_dev *dev = RD_DEV(se_dev);
+	struct rd_dev_sg_table *table;
+	struct scatterlist *rd_sg;
+	struct sg_mapping_iter m;
+	u32 rd_offset;
+	u32 rd_size;
+	u32 rd_page;
+	u32 src_len;
+	u64 tmp;
+	sense_reason_t rc;
+
+	if (dev->rd_flags & RDF_NULLIO) {
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+		return 0;
+	}
+
+	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
+	rd_offset = do_div(tmp, PAGE_SIZE);
+	rd_page = tmp;
+	rd_size = cmd->data_length;
+
+	table = rd_get_sg_table(dev, rd_page);
+	if (!table)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
+
+	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+			dev->rd_dev_id,
+			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
+			cmd->t_task_lba, rd_size, rd_page, rd_offset);
+
+	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
+	    data_direction == DMA_TO_DEVICE) {
+		rc = rd_do_prot_rw(cmd, false);
+		if (rc)
+			return rc;
+	}
+
+	src_len = PAGE_SIZE - rd_offset;
+	sg_miter_start(&m, sgl, sgl_nents,
+			data_direction == DMA_FROM_DEVICE ?
+				SG_MITER_TO_SG : SG_MITER_FROM_SG);
+	while (rd_size) {
+		u32 len;
+		void *rd_addr;
+
+		sg_miter_next(&m);
+		if (!(u32)m.length) {
+			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
+				 dev->rd_dev_id, m.addr, m.length);
+			sg_miter_stop(&m);
+			return TCM_INCORRECT_AMOUNT_OF_DATA;
+		}
+		len = min((u32)m.length, src_len);
+		if (len > rd_size) {
+			pr_debug("RD[%u]: size underrun page %d offset %d "
+				 "size %d\n", dev->rd_dev_id,
+				 rd_page, rd_offset, rd_size);
+			len = rd_size;
+		}
+		m.consumed = len;
+
+		rd_addr = sg_virt(rd_sg) + rd_offset;
+
+		if (data_direction == DMA_FROM_DEVICE)
+			memcpy(m.addr, rd_addr, len);
+		else
+			memcpy(rd_addr, m.addr, len);
+
+		rd_size -= len;
+		if (!rd_size)
+			continue;
+
+		src_len -= len;
+		if (src_len) {
+			rd_offset += len;
+			continue;
+		}
+
+		/* rd page completed, next one please */
+		rd_page++;
+		rd_offset = 0;
+		src_len = PAGE_SIZE;
+		if (rd_page <= table->page_end_offset) {
+			rd_sg++;
+			continue;
+		}
+
+		table = rd_get_sg_table(dev, rd_page);
+		if (!table) {
+			sg_miter_stop(&m);
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+
+		/* since we increment, the first sg entry is correct */
+		rd_sg = table->sg_table;
+	}
+	sg_miter_stop(&m);
+
+	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
+	    data_direction == DMA_FROM_DEVICE) {
+		rc = rd_do_prot_rw(cmd, true);
+		if (rc)
+			return rc;
+	}
+
+	target_complete_cmd(cmd, SAM_STAT_GOOD);
+	return 0;
+}
+
+enum {
+	Opt_rd_pages, Opt_rd_nullio, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_rd_pages, "rd_pages=%d"},
+	{Opt_rd_nullio, "rd_nullio=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
+		const char *page, ssize_t count)
+{
+	struct rd_dev *rd_dev = RD_DEV(dev);
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_rd_pages:
+			match_int(args, &arg);
+			rd_dev->rd_page_count = arg;
+			pr_debug("RAMDISK: Referencing Page"
+				" Count: %u\n", rd_dev->rd_page_count);
+			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
+			break;
+		case Opt_rd_nullio:
+			match_int(args, &arg);
+			if (arg != 1)
+				break;
+
+			pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
+			rd_dev->rd_flags |= RDF_NULLIO;
+			break;
+		default:
+			break;
+		}
+	}
+
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
+{
+	struct rd_dev *rd_dev = RD_DEV(dev);
+
+	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
+			rd_dev->rd_dev_id);
+	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
+			"  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
+			PAGE_SIZE, rd_dev->sg_table_count,
+			!!(rd_dev->rd_flags & RDF_NULLIO));
+	return bl;
+}
+
+static sector_t rd_get_blocks(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = RD_DEV(dev);
+
+	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
+			dev->dev_attrib.block_size) - 1;
+
+	return blocks_long;
+}
+
+static int rd_init_prot(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = RD_DEV(dev);
+
+        if (!dev->dev_attrib.pi_prot_type)
+		return 0;
+
+	return rd_build_prot_space(rd_dev, dev->prot_length,
+				   dev->dev_attrib.block_size);
+}
+
+static void rd_free_prot(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = RD_DEV(dev);
+
+	rd_release_prot_space(rd_dev);
+}
+
+static struct sbc_ops rd_sbc_ops = {
+	.execute_rw		= rd_execute_rw,
+};
+
+static sense_reason_t
+rd_parse_cdb(struct se_cmd *cmd)
+{
+	return sbc_parse_cdb(cmd, &rd_sbc_ops);
+}
+
+static const struct target_backend_ops rd_mcp_ops = {
+	.name			= "rd_mcp",
+	.inquiry_prod		= "RAMDISK-MCP",
+	.inquiry_rev		= RD_MCP_VERSION,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.alloc_device		= rd_alloc_device,
+	.configure_device	= rd_configure_device,
+	.free_device		= rd_free_device,
+	.parse_cdb		= rd_parse_cdb,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_device_type	= sbc_get_device_type,
+	.get_blocks		= rd_get_blocks,
+	.init_prot		= rd_init_prot,
+	.free_prot		= rd_free_prot,
+	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
+};
+
+int __init rd_module_init(void)
+{
+	return transport_backend_register(&rd_mcp_ops);
+}
+
+void rd_module_exit(void)
+{
+	target_backend_unregister(&rd_mcp_ops);
+}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
new file mode 100644
index 0000000..cc46a6a
--- /dev/null
+++ b/drivers/target/target_core_rd.h
@@ -0,0 +1,51 @@
+#ifndef TARGET_CORE_RD_H
+#define TARGET_CORE_RD_H
+
+#define RD_HBA_VERSION		"v4.0"
+#define RD_MCP_VERSION		"4.0"
+
+/* Largest piece of memory kmalloc can allocate */
+#define RD_MAX_ALLOCATION_SIZE	65536
+#define RD_DEVICE_QUEUE_DEPTH	32
+#define RD_MAX_DEVICE_QUEUE_DEPTH 128
+#define RD_BLOCKSIZE		512
+
+/* Used in target_core_init_configfs() for virtual LUN 0 access */
+int __init rd_module_init(void);
+void rd_module_exit(void);
+
+struct rd_dev_sg_table {
+	u32		page_start_offset;
+	u32		page_end_offset;
+	u32		rd_sg_count;
+	struct scatterlist *sg_table;
+} ____cacheline_aligned;
+
+#define RDF_HAS_PAGE_COUNT	0x01
+#define RDF_NULLIO		0x02
+
+struct rd_dev {
+	struct se_device dev;
+	u32		rd_flags;
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		rd_dev_id;
+	/* Total page count for ramdisk device */
+	u32		rd_page_count;
+	/* Number of SG tables in sg_table_array */
+	u32		sg_table_count;
+	/* Number of SG tables in sg_prot_array */
+	u32		sg_prot_count;
+	/* Array of rd_dev_sg_table_t containing scatterlists */
+	struct rd_dev_sg_table *sg_table_array;
+	/* Array of rd_dev_sg_table containing protection scatterlists */
+	struct rd_dev_sg_table *sg_prot_array;
+	/* Ramdisk HBA device is connected to */
+	struct rd_host *rd_host;
+} ____cacheline_aligned;
+
+struct rd_host {
+	u32		rd_host_dev_id_count;
+	u32		rd_host_id;		/* Unique Ramdisk Host ID */
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_RD_H */
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
new file mode 100644
index 0000000..6081178
--- /dev/null
+++ b/drivers/target/target_core_sbc.c
@@ -0,0 +1,1483 @@
+/*
+ * SCSI Block Commands (SBC) parsing and emulation.
+ *
+ * (c) Copyright 2002-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/crc-t10dif.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_tcq.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_ua.h"
+#include "target_core_alua.h"
+
+static sense_reason_t
+sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
+
+static sense_reason_t
+sbc_emulate_readcapacity(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *cdb = cmd->t_task_cdb;
+	unsigned long long blocks_long = dev->transport->get_blocks(dev);
+	unsigned char *rbuf;
+	unsigned char buf[8];
+	u32 blocks;
+
+	/*
+	 * SBC-2 says:
+	 *   If the PMI bit is set to zero and the LOGICAL BLOCK
+	 *   ADDRESS field is not set to zero, the device server shall
+	 *   terminate the command with CHECK CONDITION status with
+	 *   the sense key set to ILLEGAL REQUEST and the additional
+	 *   sense code set to INVALID FIELD IN CDB.
+	 *
+	 * In SBC-3, these fields are obsolete, but some SCSI
+	 * compliance tests actually check this, so we might as well
+	 * follow SBC-2.
+	 */
+	if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
+		return TCM_INVALID_CDB_FIELD;
+
+	if (blocks_long >= 0x00000000ffffffff)
+		blocks = 0xffffffff;
+	else
+		blocks = (u32)blocks_long;
+
+	buf[0] = (blocks >> 24) & 0xff;
+	buf[1] = (blocks >> 16) & 0xff;
+	buf[2] = (blocks >> 8) & 0xff;
+	buf[3] = blocks & 0xff;
+	buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
+	buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
+	buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
+	buf[7] = dev->dev_attrib.block_size & 0xff;
+
+	rbuf = transport_kmap_data_sg(cmd);
+	if (rbuf) {
+		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+		transport_kunmap_data_sg(cmd);
+	}
+
+	target_complete_cmd_with_length(cmd, GOOD, 8);
+	return 0;
+}
+
+static sense_reason_t
+sbc_emulate_readcapacity_16(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	int pi_prot_type = dev->dev_attrib.pi_prot_type;
+
+	unsigned char *rbuf;
+	unsigned char buf[32];
+	unsigned long long blocks = dev->transport->get_blocks(dev);
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (blocks >> 56) & 0xff;
+	buf[1] = (blocks >> 48) & 0xff;
+	buf[2] = (blocks >> 40) & 0xff;
+	buf[3] = (blocks >> 32) & 0xff;
+	buf[4] = (blocks >> 24) & 0xff;
+	buf[5] = (blocks >> 16) & 0xff;
+	buf[6] = (blocks >> 8) & 0xff;
+	buf[7] = blocks & 0xff;
+	buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
+	buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
+	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
+	buf[11] = dev->dev_attrib.block_size & 0xff;
+	/*
+	 * Set P_TYPE and PROT_EN bits for DIF support
+	 */
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		/*
+		 * Only override a device's pi_prot_type if no T10-PI is
+		 * available, and sess_prot_type has been explicitly enabled.
+		 */
+		if (!pi_prot_type)
+			pi_prot_type = sess->sess_prot_type;
+
+		if (pi_prot_type)
+			buf[12] = (pi_prot_type - 1) << 1 | 0x1;
+	}
+
+	if (dev->transport->get_lbppbe)
+		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
+
+	if (dev->transport->get_alignment_offset_lbas) {
+		u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
+		buf[14] = (lalba >> 8) & 0x3f;
+		buf[15] = lalba & 0xff;
+	}
+
+	/*
+	 * Set Thin Provisioning Enable bit following sbc3r22 in section
+	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
+	 */
+	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
+		buf[14] |= 0x80;
+
+	rbuf = transport_kmap_data_sg(cmd);
+	if (rbuf) {
+		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+		transport_kunmap_data_sg(cmd);
+	}
+
+	target_complete_cmd_with_length(cmd, GOOD, 32);
+	return 0;
+}
+
+static sense_reason_t
+sbc_emulate_startstop(struct se_cmd *cmd)
+{
+	unsigned char *cdb = cmd->t_task_cdb;
+
+	/*
+	 * See sbc3r36 section 5.25
+	 * Immediate bit should be set since there is nothing to complete
+	 * POWER CONDITION MODIFIER 0h
+	 */
+	if (!(cdb[1] & 1) || cdb[2] || cdb[3])
+		return TCM_INVALID_CDB_FIELD;
+
+	/*
+	 * See sbc3r36 section 5.25
+	 * POWER CONDITION 0h START_VALID - process START and LOEJ
+	 */
+	if (cdb[4] >> 4 & 0xf)
+		return TCM_INVALID_CDB_FIELD;
+
+	/*
+	 * See sbc3r36 section 5.25
+	 * LOEJ 0h - nothing to load or unload
+	 * START 1h - we are ready
+	 */
+	if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
+		return TCM_INVALID_CDB_FIELD;
+
+	target_complete_cmd(cmd, SAM_STAT_GOOD);
+	return 0;
+}
+
+sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
+{
+	u32 num_blocks;
+
+	if (cmd->t_task_cdb[0] == WRITE_SAME)
+		num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
+	else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
+		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
+	else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
+		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
+
+	/*
+	 * Use the explicit range when non zero is supplied, otherwise calculate
+	 * the remaining range based on ->get_blocks() - starting LBA.
+	 */
+	if (num_blocks)
+		return num_blocks;
+
+	return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
+		cmd->t_task_lba + 1;
+}
+EXPORT_SYMBOL(sbc_get_write_same_sectors);
+
+static sense_reason_t
+sbc_execute_write_same_unmap(struct se_cmd *cmd)
+{
+	struct sbc_ops *ops = cmd->protocol_data;
+	sector_t nolb = sbc_get_write_same_sectors(cmd);
+	sense_reason_t ret;
+
+	if (nolb) {
+		ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb);
+		if (ret)
+			return ret;
+	}
+
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
+static sense_reason_t
+sbc_emulate_noop(struct se_cmd *cmd)
+{
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
+static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
+{
+	return cmd->se_dev->dev_attrib.block_size * sectors;
+}
+
+static inline u32 transport_get_sectors_6(unsigned char *cdb)
+{
+	/*
+	 * Use 8-bit sector value.  SBC-3 says:
+	 *
+	 *   A TRANSFER LENGTH field set to zero specifies that 256
+	 *   logical blocks shall be written.  Any other value
+	 *   specifies the number of logical blocks that shall be
+	 *   written.
+	 */
+	return cdb[4] ? : 256;
+}
+
+static inline u32 transport_get_sectors_10(unsigned char *cdb)
+{
+	return (u32)(cdb[7] << 8) + cdb[8];
+}
+
+static inline u32 transport_get_sectors_12(unsigned char *cdb)
+{
+	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+}
+
+static inline u32 transport_get_sectors_16(unsigned char *cdb)
+{
+	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
+		    (cdb[12] << 8) + cdb[13];
+}
+
+/*
+ * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
+ */
+static inline u32 transport_get_sectors_32(unsigned char *cdb)
+{
+	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
+		    (cdb[30] << 8) + cdb[31];
+
+}
+
+static inline u32 transport_lba_21(unsigned char *cdb)
+{
+	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+}
+
+static inline u32 transport_lba_32(unsigned char *cdb)
+{
+	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+}
+
+static inline unsigned long long transport_lba_64(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
+ */
+static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
+	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+static sense_reason_t
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
+{
+	struct se_device *dev = cmd->se_dev;
+	sector_t end_lba = dev->transport->get_blocks(dev) + 1;
+	unsigned int sectors = sbc_get_write_same_sectors(cmd);
+	sense_reason_t ret;
+
+	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+		pr_err("WRITE_SAME PBDATA and LBDATA"
+			" bits not supported for Block Discard"
+			" Emulation\n");
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+	}
+	if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
+		pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
+			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
+		return TCM_INVALID_CDB_FIELD;
+	}
+	/*
+	 * Sanity check for LBA wrap and request past end of device.
+	 */
+	if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+	    ((cmd->t_task_lba + sectors) > end_lba)) {
+		pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
+		       (unsigned long long)end_lba, cmd->t_task_lba, sectors);
+		return TCM_ADDRESS_OUT_OF_RANGE;
+	}
+
+	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
+	if (flags[0] & 0x10) {
+		pr_warn("WRITE SAME with ANCHOR not supported\n");
+		return TCM_INVALID_CDB_FIELD;
+	}
+	/*
+	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
+	 * translated into block discard requests within backend code.
+	 */
+	if (flags[0] & 0x08) {
+		if (!ops->execute_unmap)
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+		if (!dev->dev_attrib.emulate_tpws) {
+			pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
+			       " has emulate_tpws disabled\n");
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		}
+		cmd->execute_cmd = sbc_execute_write_same_unmap;
+		return 0;
+	}
+	if (!ops->execute_write_same)
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+	ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
+	if (ret)
+		return ret;
+
+	cmd->execute_cmd = ops->execute_write_same;
+	return 0;
+}
+
+static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
+					   int *post_ret)
+{
+	unsigned char *buf, *addr;
+	struct scatterlist *sg;
+	unsigned int offset;
+	sense_reason_t ret = TCM_NO_SENSE;
+	int i, count;
+	/*
+	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
+	 *
+	 * 1) read the specified logical block(s);
+	 * 2) transfer logical blocks from the data-out buffer;
+	 * 3) XOR the logical blocks transferred from the data-out buffer with
+	 *    the logical blocks read, storing the resulting XOR data in a buffer;
+	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
+	 *    blocks transferred from the data-out buffer; and
+	 * 5) transfer the resulting XOR data to the data-in buffer.
+	 */
+	buf = kmalloc(cmd->data_length, GFP_KERNEL);
+	if (!buf) {
+		pr_err("Unable to allocate xor_callback buf\n");
+		return TCM_OUT_OF_RESOURCES;
+	}
+	/*
+	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
+	 * into the locally allocated *buf
+	 */
+	sg_copy_to_buffer(cmd->t_data_sg,
+			  cmd->t_data_nents,
+			  buf,
+			  cmd->data_length);
+
+	/*
+	 * Now perform the XOR against the BIDI read memory located at
+	 * cmd->t_mem_bidi_list
+	 */
+
+	offset = 0;
+	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
+		addr = kmap_atomic(sg_page(sg));
+		if (!addr) {
+			ret = TCM_OUT_OF_RESOURCES;
+			goto out;
+		}
+
+		for (i = 0; i < sg->length; i++)
+			*(addr + sg->offset + i) ^= *(buf + offset + i);
+
+		offset += sg->length;
+		kunmap_atomic(addr);
+	}
+
+out:
+	kfree(buf);
+	return ret;
+}
+
+static sense_reason_t
+sbc_execute_rw(struct se_cmd *cmd)
+{
+	struct sbc_ops *ops = cmd->protocol_data;
+
+	return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
+			       cmd->data_direction);
+}
+
+static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
+					     int *post_ret)
+{
+	struct se_device *dev = cmd->se_dev;
+	sense_reason_t ret = TCM_NO_SENSE;
+
+	/*
+	 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
+	 * within target_complete_ok_work() if the command was successfully
+	 * sent to the backend driver.
+	 */
+	spin_lock_irq(&cmd->t_state_lock);
+	if (cmd->transport_state & CMD_T_SENT) {
+		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+		*post_ret = 1;
+
+		if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
+			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	spin_unlock_irq(&cmd->t_state_lock);
+
+	/*
+	 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
+	 * before the original READ I/O submission.
+	 */
+	up(&dev->caw_sem);
+
+	return ret;
+}
+
+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
+						 int *post_ret)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct scatterlist *write_sg = NULL, *sg;
+	unsigned char *buf = NULL, *addr;
+	struct sg_mapping_iter m;
+	unsigned int offset = 0, len;
+	unsigned int nlbas = cmd->t_task_nolb;
+	unsigned int block_size = dev->dev_attrib.block_size;
+	unsigned int compare_len = (nlbas * block_size);
+	sense_reason_t ret = TCM_NO_SENSE;
+	int rc, i;
+
+	/*
+	 * Handle early failure in transport_generic_request_failure(),
+	 * which will not have taken ->caw_sem yet..
+	 */
+	if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
+		return TCM_NO_SENSE;
+	/*
+	 * Handle special case for zero-length COMPARE_AND_WRITE
+	 */
+	if (!cmd->data_length)
+		goto out;
+	/*
+	 * Immediately exit + release dev->caw_sem if command has already
+	 * been failed with a non-zero SCSI status.
+	 */
+	if (cmd->scsi_status) {
+		pr_debug("compare_and_write_callback: non zero scsi_status:"
+			" 0x%02x\n", cmd->scsi_status);
+		*post_ret = 1;
+		if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
+			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		goto out;
+	}
+
+	buf = kzalloc(cmd->data_length, GFP_KERNEL);
+	if (!buf) {
+		pr_err("Unable to allocate compare_and_write buf\n");
+		ret = TCM_OUT_OF_RESOURCES;
+		goto out;
+	}
+
+	write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+			   GFP_KERNEL);
+	if (!write_sg) {
+		pr_err("Unable to allocate compare_and_write sg\n");
+		ret = TCM_OUT_OF_RESOURCES;
+		goto out;
+	}
+	sg_init_table(write_sg, cmd->t_data_nents);
+	/*
+	 * Setup verify and write data payloads from total NumberLBAs.
+	 */
+	rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
+			       cmd->data_length);
+	if (!rc) {
+		pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
+		ret = TCM_OUT_OF_RESOURCES;
+		goto out;
+	}
+	/*
+	 * Compare against SCSI READ payload against verify payload
+	 */
+	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
+		addr = (unsigned char *)kmap_atomic(sg_page(sg));
+		if (!addr) {
+			ret = TCM_OUT_OF_RESOURCES;
+			goto out;
+		}
+
+		len = min(sg->length, compare_len);
+
+		if (memcmp(addr, buf + offset, len)) {
+			pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
+				addr, buf + offset);
+			kunmap_atomic(addr);
+			goto miscompare;
+		}
+		kunmap_atomic(addr);
+
+		offset += len;
+		compare_len -= len;
+		if (!compare_len)
+			break;
+	}
+
+	i = 0;
+	len = cmd->t_task_nolb * block_size;
+	sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
+	/*
+	 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
+	 */
+	while (len) {
+		sg_miter_next(&m);
+
+		if (block_size < PAGE_SIZE) {
+			sg_set_page(&write_sg[i], m.page, block_size,
+				    m.piter.sg->offset + block_size);
+		} else {
+			sg_miter_next(&m);
+			sg_set_page(&write_sg[i], m.page, block_size,
+				    m.piter.sg->offset);
+		}
+		len -= block_size;
+		i++;
+	}
+	sg_miter_stop(&m);
+	/*
+	 * Save the original SGL + nents values before updating to new
+	 * assignments, to be released in transport_free_pages() ->
+	 * transport_reset_sgl_orig()
+	 */
+	cmd->t_data_sg_orig = cmd->t_data_sg;
+	cmd->t_data_sg = write_sg;
+	cmd->t_data_nents_orig = cmd->t_data_nents;
+	cmd->t_data_nents = 1;
+
+	cmd->sam_task_attr = TCM_HEAD_TAG;
+	cmd->transport_complete_callback = compare_and_write_post;
+	/*
+	 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
+	 * for submitting the adjusted SGL to write instance user-data.
+	 */
+	cmd->execute_cmd = sbc_execute_rw;
+
+	spin_lock_irq(&cmd->t_state_lock);
+	cmd->t_state = TRANSPORT_PROCESSING;
+	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+	spin_unlock_irq(&cmd->t_state_lock);
+
+	__target_execute_cmd(cmd, false);
+
+	kfree(buf);
+	return ret;
+
+miscompare:
+	pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
+		dev->transport->name);
+	ret = TCM_MISCOMPARE_VERIFY;
+out:
+	/*
+	 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
+	 * sbc_compare_and_write() before the original READ I/O submission.
+	 */
+	up(&dev->caw_sem);
+	kfree(write_sg);
+	kfree(buf);
+	return ret;
+}
+
+static sense_reason_t
+sbc_compare_and_write(struct se_cmd *cmd)
+{
+	struct sbc_ops *ops = cmd->protocol_data;
+	struct se_device *dev = cmd->se_dev;
+	sense_reason_t ret;
+	int rc;
+	/*
+	 * Submit the READ first for COMPARE_AND_WRITE to perform the
+	 * comparision using SGLs at cmd->t_bidi_data_sg..
+	 */
+	rc = down_interruptible(&dev->caw_sem);
+	if (rc != 0) {
+		cmd->transport_complete_callback = NULL;
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	/*
+	 * Reset cmd->data_length to individual block_size in order to not
+	 * confuse backend drivers that depend on this value matching the
+	 * size of the I/O being submitted.
+	 */
+	cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
+
+	ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
+			      DMA_FROM_DEVICE);
+	if (ret) {
+		cmd->transport_complete_callback = NULL;
+		up(&dev->caw_sem);
+		return ret;
+	}
+	/*
+	 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
+	 * upon MISCOMPARE, or in compare_and_write_done() upon completion
+	 * of WRITE instance user-data.
+	 */
+	return TCM_NO_SENSE;
+}
+
+static int
+sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type,
+		       bool is_write, struct se_cmd *cmd)
+{
+	if (is_write) {
+		cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
+			       protect ? TARGET_PROT_DOUT_PASS :
+			       TARGET_PROT_DOUT_INSERT;
+		switch (protect) {
+		case 0x0:
+		case 0x3:
+			cmd->prot_checks = 0;
+			break;
+		case 0x1:
+		case 0x5:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x2:
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x4:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			break;
+		default:
+			pr_err("Unsupported protect field %d\n", protect);
+			return -EINVAL;
+		}
+	} else {
+		cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
+			       protect ? TARGET_PROT_DIN_PASS :
+			       TARGET_PROT_DIN_STRIP;
+		switch (protect) {
+		case 0x0:
+		case 0x1:
+		case 0x5:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x2:
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x3:
+			cmd->prot_checks = 0;
+			break;
+		case 0x4:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			break;
+		default:
+			pr_err("Unsupported protect field %d\n", protect);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static sense_reason_t
+sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
+	       u32 sectors, bool is_write)
+{
+	u8 protect = cdb[1] >> 5;
+	int sp_ops = cmd->se_sess->sup_prot_ops;
+	int pi_prot_type = dev->dev_attrib.pi_prot_type;
+	bool fabric_prot = false;
+
+	if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
+		if (unlikely(protect &&
+		    !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) {
+			pr_err("CDB contains protect bit, but device + fabric does"
+			       " not advertise PROTECT=1 feature bit\n");
+			return TCM_INVALID_CDB_FIELD;
+		}
+		if (cmd->prot_pto)
+			return TCM_NO_SENSE;
+	}
+
+	switch (dev->dev_attrib.pi_prot_type) {
+	case TARGET_DIF_TYPE3_PROT:
+		cmd->reftag_seed = 0xffffffff;
+		break;
+	case TARGET_DIF_TYPE2_PROT:
+		if (protect)
+			return TCM_INVALID_CDB_FIELD;
+
+		cmd->reftag_seed = cmd->t_task_lba;
+		break;
+	case TARGET_DIF_TYPE1_PROT:
+		cmd->reftag_seed = cmd->t_task_lba;
+		break;
+	case TARGET_DIF_TYPE0_PROT:
+		/*
+		 * See if the fabric supports T10-PI, and the session has been
+		 * configured to allow export PROTECT=1 feature bit with backend
+		 * devices that don't support T10-PI.
+		 */
+		fabric_prot = is_write ?
+			      !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) :
+			      !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT));
+
+		if (fabric_prot && cmd->se_sess->sess_prot_type) {
+			pi_prot_type = cmd->se_sess->sess_prot_type;
+			break;
+		}
+		if (!protect)
+			return TCM_NO_SENSE;
+		/* Fallthrough */
+	default:
+		pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
+		       "PROTECT: 0x%02x\n", cdb[0], protect);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd))
+		return TCM_INVALID_CDB_FIELD;
+
+	cmd->prot_type = pi_prot_type;
+	cmd->prot_length = dev->prot_length * sectors;
+
+	/**
+	 * In case protection information exists over the wire
+	 * we modify command data length to describe pure data.
+	 * The actual transfer length is data length + protection
+	 * length
+	 **/
+	if (protect)
+		cmd->data_length = sectors * dev->dev_attrib.block_size;
+
+	pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
+		 "prot_op=%d prot_checks=%d\n",
+		 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
+		 cmd->prot_op, cmd->prot_checks);
+
+	return TCM_NO_SENSE;
+}
+
+static int
+sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
+{
+	if (cdb[1] & 0x10) {
+		/* see explanation in spc_emulate_modesense */
+		if (!target_check_fua(dev)) {
+			pr_err("Got CDB: 0x%02x with DPO bit set, but device"
+			       " does not advertise support for DPO\n", cdb[0]);
+			return -EINVAL;
+		}
+	}
+	if (cdb[1] & 0x8) {
+		if (!target_check_fua(dev)) {
+			pr_err("Got CDB: 0x%02x with FUA bit set, but device"
+			       " does not advertise support for FUA write\n",
+			       cdb[0]);
+			return -EINVAL;
+		}
+		cmd->se_cmd_flags |= SCF_FUA;
+	}
+	return 0;
+}
+
+sense_reason_t
+sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+{
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *cdb = cmd->t_task_cdb;
+	unsigned int size;
+	u32 sectors = 0;
+	sense_reason_t ret;
+
+	cmd->protocol_data = ops;
+
+	switch (cdb[0]) {
+	case READ_6:
+		sectors = transport_get_sectors_6(cdb);
+		cmd->t_task_lba = transport_lba_21(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+		cmd->execute_cmd = sbc_execute_rw;
+		break;
+	case READ_10:
+		sectors = transport_get_sectors_10(cdb);
+		cmd->t_task_lba = transport_lba_32(cdb);
+
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+		if (ret)
+			return ret;
+
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+		cmd->execute_cmd = sbc_execute_rw;
+		break;
+	case READ_12:
+		sectors = transport_get_sectors_12(cdb);
+		cmd->t_task_lba = transport_lba_32(cdb);
+
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+		if (ret)
+			return ret;
+
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+		cmd->execute_cmd = sbc_execute_rw;
+		break;
+	case READ_16:
+		sectors = transport_get_sectors_16(cdb);
+		cmd->t_task_lba = transport_lba_64(cdb);
+
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+		if (ret)
+			return ret;
+
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+		cmd->execute_cmd = sbc_execute_rw;
+		break;
+	case WRITE_6:
+		sectors = transport_get_sectors_6(cdb);
+		cmd->t_task_lba = transport_lba_21(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+		cmd->execute_cmd = sbc_execute_rw;
+		break;
+	case WRITE_10:
+	case WRITE_VERIFY:
+		sectors = transport_get_sectors_10(cdb);
+		cmd->t_task_lba = transport_lba_32(cdb);
+
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+		if (ret)
+			return ret;
+
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+		cmd->execute_cmd = sbc_execute_rw;
+		break;
+	case WRITE_12:
+		sectors = transport_get_sectors_12(cdb);
+		cmd->t_task_lba = transport_lba_32(cdb);
+
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+		if (ret)
+			return ret;
+
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+		cmd->execute_cmd = sbc_execute_rw;
+		break;
+	case WRITE_16:
+		sectors = transport_get_sectors_16(cdb);
+		cmd->t_task_lba = transport_lba_64(cdb);
+
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+		if (ret)
+			return ret;
+
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+		cmd->execute_cmd = sbc_execute_rw;
+		break;
+	case XDWRITEREAD_10:
+		if (cmd->data_direction != DMA_TO_DEVICE ||
+		    !(cmd->se_cmd_flags & SCF_BIDI))
+			return TCM_INVALID_CDB_FIELD;
+		sectors = transport_get_sectors_10(cdb);
+
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		cmd->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+
+		/*
+		 * Setup BIDI XOR callback to be run after I/O completion.
+		 */
+		cmd->execute_cmd = sbc_execute_rw;
+		cmd->transport_complete_callback = &xdreadwrite_callback;
+		break;
+	case VARIABLE_LENGTH_CMD:
+	{
+		u16 service_action = get_unaligned_be16(&cdb[8]);
+		switch (service_action) {
+		case XDWRITEREAD_32:
+			sectors = transport_get_sectors_32(cdb);
+
+			if (sbc_check_dpofua(dev, cmd, cdb))
+				return TCM_INVALID_CDB_FIELD;
+			/*
+			 * Use WRITE_32 and READ_32 opcodes for the emulated
+			 * XDWRITE_READ_32 logic.
+			 */
+			cmd->t_task_lba = transport_lba_64_ext(cdb);
+			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+
+			/*
+			 * Setup BIDI XOR callback to be run during after I/O
+			 * completion.
+			 */
+			cmd->execute_cmd = sbc_execute_rw;
+			cmd->transport_complete_callback = &xdreadwrite_callback;
+			break;
+		case WRITE_SAME_32:
+			sectors = transport_get_sectors_32(cdb);
+			if (!sectors) {
+				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
+				       " supported\n");
+				return TCM_INVALID_CDB_FIELD;
+			}
+
+			size = sbc_get_size(cmd, 1);
+			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
+
+			ret = sbc_setup_write_same(cmd, &cdb[10], ops);
+			if (ret)
+				return ret;
+			break;
+		default:
+			pr_err("VARIABLE_LENGTH_CMD service action"
+				" 0x%04x not supported\n", service_action);
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		}
+		break;
+	}
+	case COMPARE_AND_WRITE:
+		sectors = cdb[13];
+		/*
+		 * Currently enforce COMPARE_AND_WRITE for a single sector
+		 */
+		if (sectors > 1) {
+			pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
+			       " than 1\n", sectors);
+			return TCM_INVALID_CDB_FIELD;
+		}
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		/*
+		 * Double size because we have two buffers, note that
+		 * zero is not an error..
+		 */
+		size = 2 * sbc_get_size(cmd, sectors);
+		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+		cmd->t_task_nolb = sectors;
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
+		cmd->execute_cmd = sbc_compare_and_write;
+		cmd->transport_complete_callback = compare_and_write_callback;
+		break;
+	case READ_CAPACITY:
+		size = READ_CAP_LEN;
+		cmd->execute_cmd = sbc_emulate_readcapacity;
+		break;
+	case SERVICE_ACTION_IN_16:
+		switch (cmd->t_task_cdb[1] & 0x1f) {
+		case SAI_READ_CAPACITY_16:
+			cmd->execute_cmd = sbc_emulate_readcapacity_16;
+			break;
+		case SAI_REPORT_REFERRALS:
+			cmd->execute_cmd = target_emulate_report_referrals;
+			break;
+		default:
+			pr_err("Unsupported SA: 0x%02x\n",
+				cmd->t_task_cdb[1] & 0x1f);
+			return TCM_INVALID_CDB_FIELD;
+		}
+		size = (cdb[10] << 24) | (cdb[11] << 16) |
+		       (cdb[12] << 8) | cdb[13];
+		break;
+	case SYNCHRONIZE_CACHE:
+	case SYNCHRONIZE_CACHE_16:
+		if (cdb[0] == SYNCHRONIZE_CACHE) {
+			sectors = transport_get_sectors_10(cdb);
+			cmd->t_task_lba = transport_lba_32(cdb);
+		} else {
+			sectors = transport_get_sectors_16(cdb);
+			cmd->t_task_lba = transport_lba_64(cdb);
+		}
+		if (ops->execute_sync_cache) {
+			cmd->execute_cmd = ops->execute_sync_cache;
+			goto check_lba;
+		}
+		size = 0;
+		cmd->execute_cmd = sbc_emulate_noop;
+		break;
+	case UNMAP:
+		if (!ops->execute_unmap)
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+		if (!dev->dev_attrib.emulate_tpu) {
+			pr_err("Got UNMAP, but backend device has"
+			       " emulate_tpu disabled\n");
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		}
+		size = get_unaligned_be16(&cdb[7]);
+		cmd->execute_cmd = sbc_execute_unmap;
+		break;
+	case WRITE_SAME_16:
+		sectors = transport_get_sectors_16(cdb);
+		if (!sectors) {
+			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+			return TCM_INVALID_CDB_FIELD;
+		}
+
+		size = sbc_get_size(cmd, 1);
+		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+
+		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+		if (ret)
+			return ret;
+		break;
+	case WRITE_SAME:
+		sectors = transport_get_sectors_10(cdb);
+		if (!sectors) {
+			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+			return TCM_INVALID_CDB_FIELD;
+		}
+
+		size = sbc_get_size(cmd, 1);
+		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
+
+		/*
+		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
+		 * of byte 1 bit 3 UNMAP instead of original reserved field
+		 */
+		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+		if (ret)
+			return ret;
+		break;
+	case VERIFY:
+	case VERIFY_16:
+		size = 0;
+		if (cdb[0] == VERIFY) {
+			sectors = transport_get_sectors_10(cdb);
+			cmd->t_task_lba = transport_lba_32(cdb);
+		} else {
+			sectors = transport_get_sectors_16(cdb);
+			cmd->t_task_lba = transport_lba_64(cdb);
+		}
+		cmd->execute_cmd = sbc_emulate_noop;
+		goto check_lba;
+	case REZERO_UNIT:
+	case SEEK_6:
+	case SEEK_10:
+		/*
+		 * There are still clients out there which use these old SCSI-2
+		 * commands. This mainly happens when running VMs with legacy
+		 * guest systems, connected via SCSI command pass-through to
+		 * iSCSI targets. Make them happy and return status GOOD.
+		 */
+		size = 0;
+		cmd->execute_cmd = sbc_emulate_noop;
+		break;
+	case START_STOP:
+		size = 0;
+		cmd->execute_cmd = sbc_emulate_startstop;
+		break;
+	default:
+		ret = spc_parse_cdb(cmd, &size);
+		if (ret)
+			return ret;
+	}
+
+	/* reject any command that we don't have a handler for */
+	if (!cmd->execute_cmd)
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+		unsigned long long end_lba;
+check_lba:
+		end_lba = dev->transport->get_blocks(dev) + 1;
+		if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+		    ((cmd->t_task_lba + sectors) > end_lba)) {
+			pr_err("cmd exceeds last lba %llu "
+				"(lba %llu, sectors %u)\n",
+				end_lba, cmd->t_task_lba, sectors);
+			return TCM_ADDRESS_OUT_OF_RANGE;
+		}
+
+		if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
+			size = sbc_get_size(cmd, sectors);
+	}
+
+	return target_cmd_size_check(cmd, size);
+}
+EXPORT_SYMBOL(sbc_parse_cdb);
+
+u32 sbc_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+EXPORT_SYMBOL(sbc_get_device_type);
+
+static sense_reason_t
+sbc_execute_unmap(struct se_cmd *cmd)
+{
+	struct sbc_ops *ops = cmd->protocol_data;
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *buf, *ptr = NULL;
+	sector_t lba;
+	int size;
+	u32 range;
+	sense_reason_t ret = 0;
+	int dl, bd_dl;
+
+	/* We never set ANC_SUP */
+	if (cmd->t_task_cdb[1])
+		return TCM_INVALID_CDB_FIELD;
+
+	if (cmd->data_length == 0) {
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+		return 0;
+	}
+
+	if (cmd->data_length < 8) {
+		pr_warn("UNMAP parameter list length %u too small\n",
+			cmd->data_length);
+		return TCM_PARAMETER_LIST_LENGTH_ERROR;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	dl = get_unaligned_be16(&buf[0]);
+	bd_dl = get_unaligned_be16(&buf[2]);
+
+	size = cmd->data_length - 8;
+	if (bd_dl > size)
+		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
+			cmd->data_length, bd_dl);
+	else
+		size = bd_dl;
+
+	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto err;
+	}
+
+	/* First UNMAP block descriptor starts at 8 byte offset */
+	ptr = &buf[8];
+	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
+		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+	while (size >= 16) {
+		lba = get_unaligned_be64(&ptr[0]);
+		range = get_unaligned_be32(&ptr[8]);
+		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
+				 (unsigned long long)lba, range);
+
+		if (range > dev->dev_attrib.max_unmap_lba_count) {
+			ret = TCM_INVALID_PARAMETER_LIST;
+			goto err;
+		}
+
+		if (lba + range > dev->transport->get_blocks(dev) + 1) {
+			ret = TCM_ADDRESS_OUT_OF_RANGE;
+			goto err;
+		}
+
+		ret = ops->execute_unmap(cmd, lba, range);
+		if (ret)
+			goto err;
+
+		ptr += 16;
+		size -= 16;
+	}
+
+err:
+	transport_kunmap_data_sg(cmd);
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
+	return ret;
+}
+
+void
+sbc_dif_generate(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct t10_pi_tuple *sdt;
+	struct scatterlist *dsg = cmd->t_data_sg, *psg;
+	sector_t sector = cmd->t_task_lba;
+	void *daddr, *paddr;
+	int i, j, offset = 0;
+	unsigned int block_size = dev->dev_attrib.block_size;
+
+	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
+		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+
+		for (j = 0; j < psg->length;
+				j += sizeof(*sdt)) {
+			__u16 crc;
+			unsigned int avail;
+
+			if (offset >= dsg->length) {
+				offset -= dsg->length;
+				kunmap_atomic(daddr - dsg->offset);
+				dsg = sg_next(dsg);
+				if (!dsg) {
+					kunmap_atomic(paddr - psg->offset);
+					return;
+				}
+				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+			}
+
+			sdt = paddr + j;
+			avail = min(block_size, dsg->length - offset);
+			crc = crc_t10dif(daddr + offset, avail);
+			if (avail < block_size) {
+				kunmap_atomic(daddr - dsg->offset);
+				dsg = sg_next(dsg);
+				if (!dsg) {
+					kunmap_atomic(paddr - psg->offset);
+					return;
+				}
+				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+				offset = block_size - avail;
+				crc = crc_t10dif_update(crc, daddr, offset);
+			} else {
+				offset += block_size;
+			}
+
+			sdt->guard_tag = cpu_to_be16(crc);
+			if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
+				sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
+			sdt->app_tag = 0;
+
+			pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
+				 " app_tag: 0x%04x ref_tag: %u\n",
+				 (cmd->data_direction == DMA_TO_DEVICE) ?
+				 "WRITE" : "READ", (unsigned long long)sector,
+				 sdt->guard_tag, sdt->app_tag,
+				 be32_to_cpu(sdt->ref_tag));
+
+			sector++;
+		}
+
+		kunmap_atomic(daddr - dsg->offset);
+		kunmap_atomic(paddr - psg->offset);
+	}
+}
+
+static sense_reason_t
+sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
+		  __u16 crc, sector_t sector, unsigned int ei_lba)
+{
+	__be16 csum;
+
+	if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+		goto check_ref;
+
+	csum = cpu_to_be16(crc);
+
+	if (sdt->guard_tag != csum) {
+		pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
+			" csum 0x%04x\n", (unsigned long long)sector,
+			be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
+		return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+	}
+
+check_ref:
+	if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG))
+		return 0;
+
+	if (cmd->prot_type == TARGET_DIF_TYPE1_PROT &&
+	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
+		pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
+		       " sector MSB: 0x%08x\n", (unsigned long long)sector,
+		       be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
+		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+	}
+
+	if (cmd->prot_type == TARGET_DIF_TYPE2_PROT &&
+	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
+		pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
+		       " ei_lba: 0x%08x\n", (unsigned long long)sector,
+			be32_to_cpu(sdt->ref_tag), ei_lba);
+		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+	}
+
+	return 0;
+}
+
+void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
+		       struct scatterlist *sg, int sg_off)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct scatterlist *psg;
+	void *paddr, *addr;
+	unsigned int i, len, left;
+	unsigned int offset = sg_off;
+
+	if (!sg)
+		return;
+
+	left = sectors * dev->prot_length;
+
+	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
+		unsigned int psg_len, copied = 0;
+
+		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+		psg_len = min(left, psg->length);
+		while (psg_len) {
+			len = min(psg_len, sg->length - offset);
+			addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
+
+			if (read)
+				memcpy(paddr + copied, addr, len);
+			else
+				memcpy(addr, paddr + copied, len);
+
+			left -= len;
+			offset += len;
+			copied += len;
+			psg_len -= len;
+
+			kunmap_atomic(addr - sg->offset - offset);
+
+			if (offset >= sg->length) {
+				sg = sg_next(sg);
+				offset = 0;
+			}
+		}
+		kunmap_atomic(paddr - psg->offset);
+	}
+}
+EXPORT_SYMBOL(sbc_dif_copy_prot);
+
+sense_reason_t
+sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+	       unsigned int ei_lba, struct scatterlist *psg, int psg_off)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct t10_pi_tuple *sdt;
+	struct scatterlist *dsg = cmd->t_data_sg;
+	sector_t sector = start;
+	void *daddr, *paddr;
+	int i;
+	sense_reason_t rc;
+	int dsg_off = 0;
+	unsigned int block_size = dev->dev_attrib.block_size;
+
+	for (; psg && sector < start + sectors; psg = sg_next(psg)) {
+		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+
+		for (i = psg_off; i < psg->length &&
+				sector < start + sectors;
+				i += sizeof(*sdt)) {
+			__u16 crc;
+			unsigned int avail;
+
+			if (dsg_off >= dsg->length) {
+				dsg_off -= dsg->length;
+				kunmap_atomic(daddr - dsg->offset);
+				dsg = sg_next(dsg);
+				if (!dsg) {
+					kunmap_atomic(paddr - psg->offset);
+					return 0;
+				}
+				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+			}
+
+			sdt = paddr + i;
+
+			pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
+				 " app_tag: 0x%04x ref_tag: %u\n",
+				 (unsigned long long)sector, sdt->guard_tag,
+				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+			if (sdt->app_tag == cpu_to_be16(0xffff)) {
+				dsg_off += block_size;
+				goto next;
+			}
+
+			avail = min(block_size, dsg->length - dsg_off);
+			crc = crc_t10dif(daddr + dsg_off, avail);
+			if (avail < block_size) {
+				kunmap_atomic(daddr - dsg->offset);
+				dsg = sg_next(dsg);
+				if (!dsg) {
+					kunmap_atomic(paddr - psg->offset);
+					return 0;
+				}
+				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+				dsg_off = block_size - avail;
+				crc = crc_t10dif_update(crc, daddr, dsg_off);
+			} else {
+				dsg_off += block_size;
+			}
+
+			rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
+			if (rc) {
+				kunmap_atomic(daddr - dsg->offset);
+				kunmap_atomic(paddr - psg->offset);
+				cmd->bad_sector = sector;
+				return rc;
+			}
+next:
+			sector++;
+			ei_lba++;
+		}
+
+		psg_off = 0;
+		kunmap_atomic(daddr - dsg->offset);
+		kunmap_atomic(paddr - psg->offset);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(sbc_dif_verify);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
new file mode 100644
index 0000000..9413e1a
--- /dev/null
+++ b/drivers/target/target_core_spc.c
@@ -0,0 +1,1418 @@
+/*
+ * SCSI Primary Commands (SPC) parsing and emulation.
+ *
+ * (c) Copyright 2002-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_tcq.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+#include "target_core_xcopy.h"
+
+static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+	/*
+	 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
+	 */
+	buf[5]	= 0x80;
+
+	/*
+	 * Set TPGS field for explicit and/or implicit ALUA access type
+	 * and opteration.
+	 *
+	 * See spc4r17 section 6.4.2 Table 135
+	 */
+	spin_lock(&lun->lun_tg_pt_gp_lock);
+	tg_pt_gp = lun->lun_tg_pt_gp;
+	if (tg_pt_gp)
+		buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
+	spin_unlock(&lun->lun_tg_pt_gp_lock);
+}
+
+sense_reason_t
+spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_lun *lun = cmd->se_lun;
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+
+	/* Set RMB (removable media) for tape devices */
+	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
+		buf[1] = 0x80;
+
+	buf[2] = 0x05; /* SPC-3 */
+
+	/*
+	 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
+	 *
+	 * SPC4 says:
+	 *   A RESPONSE DATA FORMAT field set to 2h indicates that the
+	 *   standard INQUIRY data is in the format defined in this
+	 *   standard. Response data format values less than 2h are
+	 *   obsolete. Response data format values greater than 2h are
+	 *   reserved.
+	 */
+	buf[3] = 2;
+
+	/*
+	 * Enable SCCS and TPGS fields for Emulated ALUA
+	 */
+	spc_fill_alua_data(lun, buf);
+
+	/*
+	 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
+	 */
+	if (dev->dev_attrib.emulate_3pc)
+		buf[5] |= 0x8;
+	/*
+	 * Set Protection (PROTECT) bit when DIF has been enabled on the
+	 * device, and the fabric supports VERIFY + PASS.  Also report
+	 * PROTECT=1 if sess_prot_type has been configured to allow T10-PI
+	 * to unprotected devices.
+	 */
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)
+			buf[5] |= 0x1;
+	}
+
+	buf[7] = 0x2; /* CmdQue=1 */
+
+	memcpy(&buf[8], "LIO-ORG ", 8);
+	memset(&buf[16], 0x20, 16);
+	memcpy(&buf[16], dev->t10_wwn.model,
+	       min_t(size_t, strlen(dev->t10_wwn.model), 16));
+	memcpy(&buf[32], dev->t10_wwn.revision,
+	       min_t(size_t, strlen(dev->t10_wwn.revision), 4));
+	buf[4] = 31; /* Set additional length to 31 */
+
+	return 0;
+}
+EXPORT_SYMBOL(spc_emulate_inquiry_std);
+
+/* unit serial number */
+static sense_reason_t
+spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+	u16 len;
+
+	if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
+		len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
+		len++; /* Extra Byte for NULL Terminator */
+		buf[3] = len;
+	}
+	return 0;
+}
+
+void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
+				      unsigned char *buf)
+{
+	unsigned char *p = &dev->t10_wwn.unit_serial[0];
+	int cnt;
+	bool next = true;
+
+	/*
+	 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
+	 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
+	 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
+	 * to complete the payload.  These are based from VPD=0x80 PRODUCT SERIAL
+	 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
+	 * per device uniqeness.
+	 */
+	for (cnt = 0; *p && cnt < 13; p++) {
+		int val = hex_to_bin(*p);
+
+		if (val < 0)
+			continue;
+
+		if (next) {
+			next = false;
+			buf[cnt++] |= val;
+		} else {
+			next = true;
+			buf[cnt] = val << 4;
+		}
+	}
+}
+
+/*
+ * Device identification VPD, for a complete list of
+ * DESIGNATOR TYPEs see spc4r17 Table 459.
+ */
+sense_reason_t
+spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_lun *lun = cmd->se_lun;
+	struct se_portal_group *tpg = NULL;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	unsigned char *prod = &dev->t10_wwn.model[0];
+	u32 prod_len;
+	u32 unit_serial_len, off = 0;
+	u16 len = 0, id_len;
+
+	off = 4;
+
+	/*
+	 * NAA IEEE Registered Extended Assigned designator format, see
+	 * spc4r17 section 7.7.3.6.5
+	 *
+	 * We depend upon a target_core_mod/ConfigFS provided
+	 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
+	 * value in order to return the NAA id.
+	 */
+	if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
+		goto check_t10_vend_desc;
+
+	/* CODE SET == Binary */
+	buf[off++] = 0x1;
+
+	/* Set ASSOCIATION == addressed logical unit: 0)b */
+	buf[off] = 0x00;
+
+	/* Identifier/Designator type == NAA identifier */
+	buf[off++] |= 0x3;
+	off++;
+
+	/* Identifier/Designator length */
+	buf[off++] = 0x10;
+
+	/*
+	 * Start NAA IEEE Registered Extended Identifier/Designator
+	 */
+	buf[off++] = (0x6 << 4);
+
+	/*
+	 * Use OpenFabrics IEEE Company ID: 00 14 05
+	 */
+	buf[off++] = 0x01;
+	buf[off++] = 0x40;
+	buf[off] = (0x5 << 4);
+
+	/*
+	 * Return ConfigFS Unit Serial Number information for
+	 * VENDOR_SPECIFIC_IDENTIFIER and
+	 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
+	 */
+	spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
+
+	len = 20;
+	off = (len + 4);
+
+check_t10_vend_desc:
+	/*
+	 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
+	 */
+	id_len = 8; /* For Vendor field */
+	prod_len = 4; /* For VPD Header */
+	prod_len += 8; /* For Vendor field */
+	prod_len += strlen(prod);
+	prod_len++; /* For : */
+
+	if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
+		unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
+		unit_serial_len++; /* For NULL Terminator */
+
+		id_len += sprintf(&buf[off+12], "%s:%s", prod,
+				&dev->t10_wwn.unit_serial[0]);
+	}
+	buf[off] = 0x2; /* ASCII */
+	buf[off+1] = 0x1; /* T10 Vendor ID */
+	buf[off+2] = 0x0;
+	memcpy(&buf[off+4], "LIO-ORG", 8);
+	/* Extra Byte for NULL Terminator */
+	id_len++;
+	/* Identifier Length */
+	buf[off+3] = id_len;
+	/* Header size for Designation descriptor */
+	len += (id_len + 4);
+	off += (id_len + 4);
+
+	if (1) {
+		struct t10_alua_lu_gp *lu_gp;
+		u32 padding, scsi_name_len, scsi_target_len;
+		u16 lu_gp_id = 0;
+		u16 tg_pt_gp_id = 0;
+		u16 tpgt;
+
+		tpg = lun->lun_tpg;
+		/*
+		 * Relative target port identifer, see spc4r17
+		 * section 7.7.3.7
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+		buf[off] = tpg->proto_id << 4;
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOCIATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == Relative target port identifer */
+		buf[off++] |= 0x4;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		/* Skip over Obsolete field in RTPI payload
+		 * in Table 472 */
+		off += 2;
+		buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
+		buf[off++] = (lun->lun_rtpi & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * Target port group identifier, see spc4r17
+		 * section 7.7.3.8
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+		spin_lock(&lun->lun_tg_pt_gp_lock);
+		tg_pt_gp = lun->lun_tg_pt_gp;
+		if (!tg_pt_gp) {
+			spin_unlock(&lun->lun_tg_pt_gp_lock);
+			goto check_lu_gp;
+		}
+		tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
+		spin_unlock(&lun->lun_tg_pt_gp_lock);
+
+		buf[off] = tpg->proto_id << 4;
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOCIATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == Target port group identifier */
+		buf[off++] |= 0x5;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		off += 2; /* Skip over Reserved Field */
+		buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
+		buf[off++] = (tg_pt_gp_id & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * Logical Unit Group identifier, see spc4r17
+		 * section 7.7.3.8
+		 */
+check_lu_gp:
+		lu_gp_mem = dev->dev_alua_lu_gp_mem;
+		if (!lu_gp_mem)
+			goto check_scsi_name;
+
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		lu_gp = lu_gp_mem->lu_gp;
+		if (!lu_gp) {
+			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+			goto check_scsi_name;
+		}
+		lu_gp_id = lu_gp->lu_gp_id;
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		/* DESIGNATOR TYPE == Logical Unit Group identifier */
+		buf[off++] |= 0x6;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		off += 2; /* Skip over Reserved Field */
+		buf[off++] = ((lu_gp_id >> 8) & 0xff);
+		buf[off++] = (lu_gp_id & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * SCSI name string designator, see spc4r17
+		 * section 7.7.3.11
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+check_scsi_name:
+		buf[off] = tpg->proto_id << 4;
+		buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOCIATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == SCSI name string */
+		buf[off++] |= 0x8;
+		off += 2; /* Skip over Reserved and length */
+		/*
+		 * SCSI name string identifer containing, $FABRIC_MOD
+		 * dependent information.  For LIO-Target and iSCSI
+		 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
+		 * UTF-8 encoding.
+		 */
+		tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
+		scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
+					tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
+		scsi_name_len += 1 /* Include  NULL terminator */;
+		/*
+		 * The null-terminated, null-padded (see 4.4.2) SCSI
+		 * NAME STRING field contains a UTF-8 format string.
+		 * The number of bytes in the SCSI NAME STRING field
+		 * (i.e., the value in the DESIGNATOR LENGTH field)
+		 * shall be no larger than 256 and shall be a multiple
+		 * of four.
+		 */
+		padding = ((-scsi_name_len) & 3);
+		if (padding)
+			scsi_name_len += padding;
+		if (scsi_name_len > 256)
+			scsi_name_len = 256;
+
+		buf[off-1] = scsi_name_len;
+		off += scsi_name_len;
+		/* Header size + Designation descriptor */
+		len += (scsi_name_len + 4);
+
+		/*
+		 * Target device designator
+		 */
+		buf[off] = tpg->proto_id << 4;
+		buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOCIATION == target device: 10b */
+		buf[off] |= 0x20;
+		/* DESIGNATOR TYPE == SCSI name string */
+		buf[off++] |= 0x8;
+		off += 2; /* Skip over Reserved and length */
+		/*
+		 * SCSI name string identifer containing, $FABRIC_MOD
+		 * dependent information.  For LIO-Target and iSCSI
+		 * Target Port, this means "<iSCSI name>" in
+		 * UTF-8 encoding.
+		 */
+		scsi_target_len = sprintf(&buf[off], "%s",
+					  tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+		scsi_target_len += 1 /* Include  NULL terminator */;
+		/*
+		 * The null-terminated, null-padded (see 4.4.2) SCSI
+		 * NAME STRING field contains a UTF-8 format string.
+		 * The number of bytes in the SCSI NAME STRING field
+		 * (i.e., the value in the DESIGNATOR LENGTH field)
+		 * shall be no larger than 256 and shall be a multiple
+		 * of four.
+		 */
+		padding = ((-scsi_target_len) & 3);
+		if (padding)
+			scsi_target_len += padding;
+		if (scsi_target_len > 256)
+			scsi_target_len = 256;
+
+		buf[off-1] = scsi_target_len;
+		off += scsi_target_len;
+
+		/* Header size + Designation descriptor */
+		len += (scsi_target_len + 4);
+	}
+	buf[2] = ((len >> 8) & 0xff);
+	buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
+	return 0;
+}
+EXPORT_SYMBOL(spc_emulate_evpd_83);
+
+/* Extended INQUIRY Data VPD Page */
+static sense_reason_t
+spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+
+	buf[3] = 0x3c;
+	/*
+	 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
+	 * only for TYPE3 protection.
+	 */
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT ||
+		    cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
+			buf[4] = 0x5;
+		else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
+			 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
+			buf[4] = 0x4;
+	}
+
+	/* logical unit supports type 1 and type 3 protection */
+	if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
+	    (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
+	    (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
+		buf[4] |= (0x3 << 3);
+	}
+
+	/* Set HEADSUP, ORDSUP, SIMPSUP */
+	buf[5] = 0x07;
+
+	/* If WriteCache emulation is enabled, set V_SUP */
+	if (target_check_wce(dev))
+		buf[6] = 0x01;
+	/* If an LBA map is present set R_SUP */
+	spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
+	if (!list_empty(&dev->t10_alua.lba_map_list))
+		buf[8] = 0x10;
+	spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
+	return 0;
+}
+
+/* Block Limits VPD page */
+static sense_reason_t
+spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+	u32 mtl = 0;
+	int have_tp = 0, opt, min;
+
+	/*
+	 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
+	 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
+	 * different page length for Thin Provisioning.
+	 */
+	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
+		have_tp = 1;
+
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[3] = have_tp ? 0x3c : 0x10;
+
+	/* Set WSNZ to 1 */
+	buf[4] = 0x01;
+	/*
+	 * Set MAXIMUM COMPARE AND WRITE LENGTH
+	 */
+	if (dev->dev_attrib.emulate_caw)
+		buf[5] = 0x01;
+
+	/*
+	 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
+	 */
+	if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
+		put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
+	else
+		put_unaligned_be16(1, &buf[6]);
+
+	/*
+	 * Set MAXIMUM TRANSFER LENGTH
+	 *
+	 * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
+	 * enforcing maximum HW scatter-gather-list entry limit
+	 */
+	if (cmd->se_tfo->max_data_sg_nents) {
+		mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
+		       dev->dev_attrib.block_size;
+	}
+	put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]);
+
+	/*
+	 * Set OPTIMAL TRANSFER LENGTH
+	 */
+	if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
+		put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
+	else
+		put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
+
+	/*
+	 * Exit now if we don't support TP.
+	 */
+	if (!have_tp)
+		goto max_write_same;
+
+	/*
+	 * Set MAXIMUM UNMAP LBA COUNT
+	 */
+	put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
+
+	/*
+	 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
+	 */
+	put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
+			   &buf[24]);
+
+	/*
+	 * Set OPTIMAL UNMAP GRANULARITY
+	 */
+	put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
+
+	/*
+	 * UNMAP GRANULARITY ALIGNMENT
+	 */
+	put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
+			   &buf[32]);
+	if (dev->dev_attrib.unmap_granularity_alignment != 0)
+		buf[32] |= 0x80; /* Set the UGAVALID bit */
+
+	/*
+	 * MAXIMUM WRITE SAME LENGTH
+	 */
+max_write_same:
+	put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
+
+	return 0;
+}
+
+/* Block Device Characteristics VPD page */
+static sense_reason_t
+spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[3] = 0x3c;
+	buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
+
+	return 0;
+}
+
+/* Thin Provisioning VPD */
+static sense_reason_t
+spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	/*
+	 * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
+	 *
+	 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
+	 * zero, then the page length shall be set to 0004h.  If the DP bit
+	 * is set to one, then the page length shall be set to the value
+	 * defined in table 162.
+	 */
+	buf[0] = dev->transport->get_device_type(dev);
+
+	/*
+	 * Set Hardcoded length mentioned above for DP=0
+	 */
+	put_unaligned_be16(0x0004, &buf[2]);
+
+	/*
+	 * The THRESHOLD EXPONENT field indicates the threshold set size in
+	 * LBAs as a power of 2 (i.e., the threshold set size is equal to
+	 * 2(threshold exponent)).
+	 *
+	 * Note that this is currently set to 0x00 as mkp says it will be
+	 * changing again.  We can enable this once it has settled in T10
+	 * and is actually used by Linux/SCSI ML code.
+	 */
+	buf[4] = 0x00;
+
+	/*
+	 * A TPU bit set to one indicates that the device server supports
+	 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
+	 * that the device server does not support the UNMAP command.
+	 */
+	if (dev->dev_attrib.emulate_tpu != 0)
+		buf[5] = 0x80;
+
+	/*
+	 * A TPWS bit set to one indicates that the device server supports
+	 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
+	 * A TPWS bit set to zero indicates that the device server does not
+	 * support the use of the WRITE SAME (16) command to unmap LBAs.
+	 */
+	if (dev->dev_attrib.emulate_tpws != 0)
+		buf[5] |= 0x40 | 0x20;
+
+	return 0;
+}
+
+/* Referrals VPD page */
+static sense_reason_t
+spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[3] = 0x0c;
+	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
+	put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]);
+
+	return 0;
+}
+
+static sense_reason_t
+spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+
+static struct {
+	uint8_t		page;
+	sense_reason_t	(*emulate)(struct se_cmd *, unsigned char *);
+} evpd_handlers[] = {
+	{ .page = 0x00, .emulate = spc_emulate_evpd_00 },
+	{ .page = 0x80, .emulate = spc_emulate_evpd_80 },
+	{ .page = 0x83, .emulate = spc_emulate_evpd_83 },
+	{ .page = 0x86, .emulate = spc_emulate_evpd_86 },
+	{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
+	{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
+	{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
+	{ .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
+};
+
+/* supported vital product data pages */
+static sense_reason_t
+spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+	int p;
+
+	/*
+	 * Only report the INQUIRY EVPD=1 pages after a valid NAA
+	 * Registered Extended LUN WWN has been set via ConfigFS
+	 * during device creation/restart.
+	 */
+	if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
+		buf[3] = ARRAY_SIZE(evpd_handlers);
+		for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
+			buf[p + 4] = evpd_handlers[p].page;
+	}
+
+	return 0;
+}
+
+static sense_reason_t
+spc_emulate_inquiry(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_portal_group *tpg = cmd->se_lun->lun_tpg;
+	unsigned char *rbuf;
+	unsigned char *cdb = cmd->t_task_cdb;
+	unsigned char *buf;
+	sense_reason_t ret;
+	int p;
+	int len = 0;
+
+	buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
+	if (!buf) {
+		pr_err("Unable to allocate response buffer for INQUIRY\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+
+	if (dev == rcu_access_pointer(tpg->tpg_virt_lun0->lun_se_dev))
+		buf[0] = 0x3f; /* Not connected */
+	else
+		buf[0] = dev->transport->get_device_type(dev);
+
+	if (!(cdb[1] & 0x1)) {
+		if (cdb[2]) {
+			pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
+			       cdb[2]);
+			ret = TCM_INVALID_CDB_FIELD;
+			goto out;
+		}
+
+		ret = spc_emulate_inquiry_std(cmd, buf);
+		len = buf[4] + 5;
+		goto out;
+	}
+
+	for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
+		if (cdb[2] == evpd_handlers[p].page) {
+			buf[1] = cdb[2];
+			ret = evpd_handlers[p].emulate(cmd, buf);
+			len = get_unaligned_be16(&buf[2]) + 4;
+			goto out;
+		}
+	}
+
+	pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+	ret = TCM_INVALID_CDB_FIELD;
+
+out:
+	rbuf = transport_kmap_data_sg(cmd);
+	if (rbuf) {
+		memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
+		transport_kunmap_data_sg(cmd);
+	}
+	kfree(buf);
+
+	if (!ret)
+		target_complete_cmd_with_length(cmd, GOOD, len);
+	return ret;
+}
+
+static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
+{
+	p[0] = 0x01;
+	p[1] = 0x0a;
+
+	/* No changeable values for now */
+	if (pc == 1)
+		goto out;
+
+out:
+	return 12;
+}
+
+static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+
+	p[0] = 0x0a;
+	p[1] = 0x0a;
+
+	/* No changeable values for now */
+	if (pc == 1)
+		goto out;
+
+	/* GLTSD: No implicit save of log parameters */
+	p[2] = (1 << 1);
+	if (target_sense_desc_format(dev))
+		/* D_SENSE: Descriptor format sense data for 64bit sectors */
+		p[2] |= (1 << 2);
+
+	/*
+	 * From spc4r23, 7.4.7 Control mode page
+	 *
+	 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
+	 * restrictions on the algorithm used for reordering commands
+	 * having the SIMPLE task attribute (see SAM-4).
+	 *
+	 *                    Table 368 -- QUEUE ALGORITHM MODIFIER field
+	 *                         Code      Description
+	 *                          0h       Restricted reordering
+	 *                          1h       Unrestricted reordering allowed
+	 *                          2h to 7h    Reserved
+	 *                          8h to Fh    Vendor specific
+	 *
+	 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
+	 * the device server shall order the processing sequence of commands
+	 * having the SIMPLE task attribute such that data integrity is maintained
+	 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
+	 * requests is halted at any time, the final value of all data observable
+	 * on the medium shall be the same as if all the commands had been processed
+	 * with the ORDERED task attribute).
+	 *
+	 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
+	 * device server may reorder the processing sequence of commands having the
+	 * SIMPLE task attribute in any manner. Any data integrity exposures related to
+	 * command sequence order shall be explicitly handled by the application client
+	 * through the selection of appropriate ommands and task attributes.
+	 */
+	p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
+	/*
+	 * From spc4r17, section 7.4.6 Control mode Page
+	 *
+	 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
+	 *
+	 * 00b: The logical unit shall clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall not establish a unit attention condition when a com-
+	 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
+	 * status.
+	 *
+	 * 10b: The logical unit shall not clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall not establish a unit attention condition when
+	 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
+	 * CONFLICT status.
+	 *
+	 * 11b a The logical unit shall not clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall establish a unit attention condition for the
+	 * initiator port associated with the I_T nexus on which the BUSY,
+	 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
+	 * Depending on the status, the additional sense code shall be set to
+	 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
+	 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
+	 * command, a unit attention condition shall be established only once
+	 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
+	 * to the number of commands completed with one of those status codes.
+	 */
+	p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
+	       (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+	/*
+	 * From spc4r17, section 7.4.6 Control mode Page
+	 *
+	 * Task Aborted Status (TAS) bit set to zero.
+	 *
+	 * A task aborted status (TAS) bit set to zero specifies that aborted
+	 * tasks shall be terminated by the device server without any response
+	 * to the application client. A TAS bit set to one specifies that tasks
+	 * aborted by the actions of an I_T nexus other than the I_T nexus on
+	 * which the command was received shall be completed with TASK ABORTED
+	 * status (see SAM-4).
+	 */
+	p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
+	/*
+	 * From spc4r30, section 7.5.7 Control mode page
+	 *
+	 * Application Tag Owner (ATO) bit set to one.
+	 *
+	 * If the ATO bit is set to one the device server shall not modify the
+	 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
+	 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
+	 * TAG field.
+	 */
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type)
+			p[5] |= 0x80;
+	}
+
+	p[8] = 0xff;
+	p[9] = 0xff;
+	p[11] = 30;
+
+out:
+	return 12;
+}
+
+static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	p[0] = 0x08;
+	p[1] = 0x12;
+
+	/* No changeable values for now */
+	if (pc == 1)
+		goto out;
+
+	if (target_check_wce(dev))
+		p[2] = 0x04; /* Write Cache Enable */
+	p[12] = 0x20; /* Disabled Read Ahead */
+
+out:
+	return 20;
+}
+
+static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
+{
+	p[0] = 0x1c;
+	p[1] = 0x0a;
+
+	/* No changeable values for now */
+	if (pc == 1)
+		goto out;
+
+out:
+	return 12;
+}
+
+static struct {
+	uint8_t		page;
+	uint8_t		subpage;
+	int		(*emulate)(struct se_cmd *, u8, unsigned char *);
+} modesense_handlers[] = {
+	{ .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
+	{ .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
+	{ .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
+	{ .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
+};
+
+static void spc_modesense_write_protect(unsigned char *buf, int type)
+{
+	/*
+	 * I believe that the WP bit (bit 7) in the mode header is the same for
+	 * all device types..
+	 */
+	switch (type) {
+	case TYPE_DISK:
+	case TYPE_TAPE:
+	default:
+		buf[0] |= 0x80; /* WP bit */
+		break;
+	}
+}
+
+static void spc_modesense_dpofua(unsigned char *buf, int type)
+{
+	switch (type) {
+	case TYPE_DISK:
+		buf[0] |= 0x10; /* DPOFUA bit */
+		break;
+	default:
+		break;
+	}
+}
+
+static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
+{
+	*buf++ = 8;
+	put_unaligned_be32(min(blocks, 0xffffffffull), buf);
+	buf += 4;
+	put_unaligned_be32(block_size, buf);
+	return 9;
+}
+
+static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
+{
+	if (blocks <= 0xffffffff)
+		return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
+
+	*buf++ = 1;		/* LONGLBA */
+	buf += 2;
+	*buf++ = 16;
+	put_unaligned_be64(blocks, buf);
+	buf += 12;
+	put_unaligned_be32(block_size, buf);
+
+	return 17;
+}
+
+static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	char *cdb = cmd->t_task_cdb;
+	unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
+	int type = dev->transport->get_device_type(dev);
+	int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
+	bool dbd = !!(cdb[1] & 0x08);
+	bool llba = ten ? !!(cdb[1] & 0x10) : false;
+	u8 pc = cdb[2] >> 6;
+	u8 page = cdb[2] & 0x3f;
+	u8 subpage = cdb[3];
+	int length = 0;
+	int ret;
+	int i;
+	bool read_only = target_lun_is_rdonly(cmd);;
+
+	memset(buf, 0, SE_MODE_PAGE_BUF);
+
+	/*
+	 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
+	 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
+	 */
+	length = ten ? 3 : 2;
+
+	/* DEVICE-SPECIFIC PARAMETER */
+	if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only)
+		spc_modesense_write_protect(&buf[length], type);
+
+	/*
+	 * SBC only allows us to enable FUA and DPO together.  Fortunately
+	 * DPO is explicitly specified as a hint, so a noop is a perfectly
+	 * valid implementation.
+	 */
+	if (target_check_fua(dev))
+		spc_modesense_dpofua(&buf[length], type);
+
+	++length;
+
+	/* BLOCK DESCRIPTOR */
+
+	/*
+	 * For now we only include a block descriptor for disk (SBC)
+	 * devices; other command sets use a slightly different format.
+	 */
+	if (!dbd && type == TYPE_DISK) {
+		u64 blocks = dev->transport->get_blocks(dev);
+		u32 block_size = dev->dev_attrib.block_size;
+
+		if (ten) {
+			if (llba) {
+				length += spc_modesense_long_blockdesc(&buf[length],
+								       blocks, block_size);
+			} else {
+				length += 3;
+				length += spc_modesense_blockdesc(&buf[length],
+								  blocks, block_size);
+			}
+		} else {
+			length += spc_modesense_blockdesc(&buf[length], blocks,
+							  block_size);
+		}
+	} else {
+		if (ten)
+			length += 4;
+		else
+			length += 1;
+	}
+
+	if (page == 0x3f) {
+		if (subpage != 0x00 && subpage != 0xff) {
+			pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
+			return TCM_INVALID_CDB_FIELD;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
+			/*
+			 * Tricky way to say all subpage 00h for
+			 * subpage==0, all subpages for subpage==0xff
+			 * (and we just checked above that those are
+			 * the only two possibilities).
+			 */
+			if ((modesense_handlers[i].subpage & ~subpage) == 0) {
+				ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
+				if (!ten && length + ret >= 255)
+					break;
+				length += ret;
+			}
+		}
+
+		goto set_length;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
+		if (modesense_handlers[i].page == page &&
+		    modesense_handlers[i].subpage == subpage) {
+			length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
+			goto set_length;
+		}
+
+	/*
+	 * We don't intend to implement:
+	 *  - obsolete page 03h "format parameters" (checked by Solaris)
+	 */
+	if (page != 0x03)
+		pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
+		       page, subpage);
+
+	return TCM_UNKNOWN_MODE_PAGE;
+
+set_length:
+	if (ten)
+		put_unaligned_be16(length - 2, buf);
+	else
+		buf[0] = length - 1;
+
+	rbuf = transport_kmap_data_sg(cmd);
+	if (rbuf) {
+		memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
+		transport_kunmap_data_sg(cmd);
+	}
+
+	target_complete_cmd_with_length(cmd, GOOD, length);
+	return 0;
+}
+
+static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
+{
+	char *cdb = cmd->t_task_cdb;
+	bool ten = cdb[0] == MODE_SELECT_10;
+	int off = ten ? 8 : 4;
+	bool pf = !!(cdb[1] & 0x10);
+	u8 page, subpage;
+	unsigned char *buf;
+	unsigned char tbuf[SE_MODE_PAGE_BUF];
+	int length;
+	sense_reason_t ret = 0;
+	int i;
+
+	if (!cmd->data_length) {
+		target_complete_cmd(cmd, GOOD);
+		return 0;
+	}
+
+	if (cmd->data_length < off + 2)
+		return TCM_PARAMETER_LIST_LENGTH_ERROR;
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	if (!pf) {
+		ret = TCM_INVALID_CDB_FIELD;
+		goto out;
+	}
+
+	page = buf[off] & 0x3f;
+	subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
+
+	for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
+		if (modesense_handlers[i].page == page &&
+		    modesense_handlers[i].subpage == subpage) {
+			memset(tbuf, 0, SE_MODE_PAGE_BUF);
+			length = modesense_handlers[i].emulate(cmd, 0, tbuf);
+			goto check_contents;
+		}
+
+	ret = TCM_UNKNOWN_MODE_PAGE;
+	goto out;
+
+check_contents:
+	if (cmd->data_length < off + length) {
+		ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+		goto out;
+	}
+
+	if (memcmp(buf + off, tbuf, length))
+		ret = TCM_INVALID_PARAMETER_LIST;
+
+out:
+	transport_kunmap_data_sg(cmd);
+
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
+	return ret;
+}
+
+static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
+{
+	unsigned char *cdb = cmd->t_task_cdb;
+	unsigned char *rbuf;
+	u8 ua_asc = 0, ua_ascq = 0;
+	unsigned char buf[SE_SENSE_BUF];
+	bool desc_format = target_sense_desc_format(cmd->se_dev);
+
+	memset(buf, 0, SE_SENSE_BUF);
+
+	if (cdb[1] & 0x01) {
+		pr_err("REQUEST_SENSE description emulation not"
+			" supported\n");
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	rbuf = transport_kmap_data_sg(cmd);
+	if (!rbuf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))
+		scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION,
+					ua_asc, ua_ascq);
+	else
+		scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0);
+
+	memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+	transport_kunmap_data_sg(cmd);
+
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
+sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct scsi_lun slun;
+	unsigned char *buf;
+	u32 lun_count = 0, offset = 8;
+	__be32 len;
+
+	buf = transport_kmap_data_sg(cmd);
+	if (cmd->data_length && !buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	/*
+	 * If no struct se_session pointer is present, this struct se_cmd is
+	 * coming via a target_core_mod PASSTHROUGH op, and not through
+	 * a $FABRIC_MOD.  In that case, report LUN=0 only.
+	 */
+	if (!sess)
+		goto done;
+
+	nacl = sess->se_node_acl;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+		/*
+		 * We determine the correct LUN LIST LENGTH even once we
+		 * have reached the initial allocation length.
+		 * See SPC2-R20 7.19.
+		 */
+		lun_count++;
+		if (offset >= cmd->data_length)
+			continue;
+
+		int_to_scsilun(deve->mapped_lun, &slun);
+		memcpy(buf + offset, &slun,
+		       min(8u, cmd->data_length - offset));
+		offset += 8;
+	}
+	rcu_read_unlock();
+
+	/*
+	 * See SPC3 r07, page 159.
+	 */
+done:
+	/*
+	 * If no LUNs are accessible, report virtual LUN 0.
+	 */
+	if (lun_count == 0) {
+		int_to_scsilun(0, &slun);
+		if (cmd->data_length > 8)
+			memcpy(buf + offset, &slun,
+			       min(8u, cmd->data_length - offset));
+		lun_count = 1;
+	}
+
+	if (buf) {
+		len = cpu_to_be32(lun_count * 8);
+		memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+		transport_kunmap_data_sg(cmd);
+	}
+
+	target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
+	return 0;
+}
+EXPORT_SYMBOL(spc_emulate_report_luns);
+
+static sense_reason_t
+spc_emulate_testunitready(struct se_cmd *cmd)
+{
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
+sense_reason_t
+spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
+{
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *cdb = cmd->t_task_cdb;
+
+	switch (cdb[0]) {
+	case MODE_SELECT:
+		*size = cdb[4];
+		cmd->execute_cmd = spc_emulate_modeselect;
+		break;
+	case MODE_SELECT_10:
+		*size = (cdb[7] << 8) + cdb[8];
+		cmd->execute_cmd = spc_emulate_modeselect;
+		break;
+	case MODE_SENSE:
+		*size = cdb[4];
+		cmd->execute_cmd = spc_emulate_modesense;
+		break;
+	case MODE_SENSE_10:
+		*size = (cdb[7] << 8) + cdb[8];
+		cmd->execute_cmd = spc_emulate_modesense;
+		break;
+	case LOG_SELECT:
+	case LOG_SENSE:
+		*size = (cdb[7] << 8) + cdb[8];
+		break;
+	case PERSISTENT_RESERVE_IN:
+		*size = (cdb[7] << 8) + cdb[8];
+		cmd->execute_cmd = target_scsi3_emulate_pr_in;
+		break;
+	case PERSISTENT_RESERVE_OUT:
+		*size = (cdb[7] << 8) + cdb[8];
+		cmd->execute_cmd = target_scsi3_emulate_pr_out;
+		break;
+	case RELEASE:
+	case RELEASE_10:
+		if (cdb[0] == RELEASE_10)
+			*size = (cdb[7] << 8) | cdb[8];
+		else
+			*size = cmd->data_length;
+
+		cmd->execute_cmd = target_scsi2_reservation_release;
+		break;
+	case RESERVE:
+	case RESERVE_10:
+		/*
+		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
+		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
+		 */
+		if (cdb[0] == RESERVE_10)
+			*size = (cdb[7] << 8) | cdb[8];
+		else
+			*size = cmd->data_length;
+
+		cmd->execute_cmd = target_scsi2_reservation_reserve;
+		break;
+	case REQUEST_SENSE:
+		*size = cdb[4];
+		cmd->execute_cmd = spc_emulate_request_sense;
+		break;
+	case INQUIRY:
+		*size = (cdb[3] << 8) + cdb[4];
+
+		/*
+		 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
+		 * See spc4r17 section 5.3
+		 */
+		cmd->sam_task_attr = TCM_HEAD_TAG;
+		cmd->execute_cmd = spc_emulate_inquiry;
+		break;
+	case SECURITY_PROTOCOL_IN:
+	case SECURITY_PROTOCOL_OUT:
+		*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+		break;
+	case EXTENDED_COPY:
+		*size = get_unaligned_be32(&cdb[10]);
+		cmd->execute_cmd = target_do_xcopy;
+		break;
+	case RECEIVE_COPY_RESULTS:
+		*size = get_unaligned_be32(&cdb[10]);
+		cmd->execute_cmd = target_do_receive_copy_results;
+		break;
+	case READ_ATTRIBUTE:
+	case WRITE_ATTRIBUTE:
+		*size = (cdb[10] << 24) | (cdb[11] << 16) |
+		       (cdb[12] << 8) | cdb[13];
+		break;
+	case RECEIVE_DIAGNOSTIC:
+	case SEND_DIAGNOSTIC:
+		*size = (cdb[3] << 8) | cdb[4];
+		break;
+	case WRITE_BUFFER:
+		*size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		break;
+	case REPORT_LUNS:
+		cmd->execute_cmd = spc_emulate_report_luns;
+		*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+		/*
+		 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
+		 * See spc4r17 section 5.3
+		 */
+		cmd->sam_task_attr = TCM_HEAD_TAG;
+		break;
+	case TEST_UNIT_READY:
+		cmd->execute_cmd = spc_emulate_testunitready;
+		*size = 0;
+		break;
+	case MAINTENANCE_IN:
+		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
+			/*
+			 * MAINTENANCE_IN from SCC-2
+			 * Check for emulated MI_REPORT_TARGET_PGS
+			 */
+			if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
+				cmd->execute_cmd =
+					target_emulate_report_target_port_groups;
+			}
+			*size = get_unaligned_be32(&cdb[6]);
+		} else {
+			/*
+			 * GPCMD_SEND_KEY from multi media commands
+			 */
+			*size = get_unaligned_be16(&cdb[8]);
+		}
+		break;
+	case MAINTENANCE_OUT:
+		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
+			/*
+			 * MAINTENANCE_OUT from SCC-2
+			 * Check for emulated MO_SET_TARGET_PGS.
+			 */
+			if (cdb[1] == MO_SET_TARGET_PGS) {
+				cmd->execute_cmd =
+					target_emulate_set_target_port_groups;
+			}
+			*size = get_unaligned_be32(&cdb[6]);
+		} else {
+			/*
+			 * GPCMD_SEND_KEY from multi media commands
+			 */
+			*size = get_unaligned_be16(&cdb[8]);
+		}
+		break;
+	default:
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(spc_parse_cdb);
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
new file mode 100644
index 0000000..81a6b3e
--- /dev/null
+++ b/drivers/target/target_core_stat.c
@@ -0,0 +1,1364 @@
+/*******************************************************************************
+ * Filename:  target_core_stat.c
+ *
+ * Modern ConfigFS group context specific statistics based on original
+ * target_core_mib.c code
+ *
+ * (c) Copyright 2006-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/utsname.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/configfs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+#define NONE		"None"
+#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
+
+#define SCSI_LU_INDEX			1
+#define LU_COUNT			1
+
+/*
+ * SCSI Device Table
+ */
+
+static struct se_device *to_stat_dev(struct config_item *item)
+{
+	struct se_dev_stat_grps *sgrps = container_of(to_config_group(item),
+			struct se_dev_stat_grps, scsi_dev_group);
+	return container_of(sgrps, struct se_device, dev_stat_grps);
+}
+
+static ssize_t target_stat_inst_show(struct config_item *item, char *page)
+{
+	struct se_hba *hba = to_stat_dev(item)->se_hba;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+
+static ssize_t target_stat_indx_show(struct config_item *item, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->dev_index);
+}
+
+static ssize_t target_stat_role_show(struct config_item *item, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "Target\n");
+}
+
+static ssize_t target_stat_ports_show(struct config_item *item, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->export_count);
+}
+
+CONFIGFS_ATTR_RO(target_stat_, inst);
+CONFIGFS_ATTR_RO(target_stat_, indx);
+CONFIGFS_ATTR_RO(target_stat_, role);
+CONFIGFS_ATTR_RO(target_stat_, ports);
+
+static struct configfs_attribute *target_stat_scsi_dev_attrs[] = {
+	&target_stat_attr_inst,
+	&target_stat_attr_indx,
+	&target_stat_attr_role,
+	&target_stat_attr_ports,
+	NULL,
+};
+
+static struct config_item_type target_stat_scsi_dev_cit = {
+	.ct_attrs		= target_stat_scsi_dev_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Target Device Table
+ */
+static struct se_device *to_stat_tgt_dev(struct config_item *item)
+{
+	struct se_dev_stat_grps *sgrps = container_of(to_config_group(item),
+			struct se_dev_stat_grps, scsi_tgt_dev_group);
+	return container_of(sgrps, struct se_device, dev_stat_grps);
+}
+
+static ssize_t target_stat_tgt_inst_show(struct config_item *item, char *page)
+{
+	struct se_hba *hba = to_stat_tgt_dev(item)->se_hba;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+
+static ssize_t target_stat_tgt_indx_show(struct config_item *item, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", to_stat_tgt_dev(item)->dev_index);
+}
+
+static ssize_t target_stat_tgt_num_lus_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
+}
+
+static ssize_t target_stat_tgt_status_show(struct config_item *item,
+		char *page)
+{
+	if (to_stat_tgt_dev(item)->export_count)
+		return snprintf(page, PAGE_SIZE, "activated");
+	else
+		return snprintf(page, PAGE_SIZE, "deactivated");
+}
+
+static ssize_t target_stat_tgt_non_access_lus_show(struct config_item *item,
+		char *page)
+{
+	int non_accessible_lus;
+
+	if (to_stat_tgt_dev(item)->export_count)
+		non_accessible_lus = 0;
+	else
+		non_accessible_lus = 1;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
+}
+
+static ssize_t target_stat_tgt_resets_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%lu\n",
+			atomic_long_read(&to_stat_tgt_dev(item)->num_resets));
+}
+
+CONFIGFS_ATTR_RO(target_stat_tgt_, inst);
+CONFIGFS_ATTR_RO(target_stat_tgt_, indx);
+CONFIGFS_ATTR_RO(target_stat_tgt_, num_lus);
+CONFIGFS_ATTR_RO(target_stat_tgt_, status);
+CONFIGFS_ATTR_RO(target_stat_tgt_, non_access_lus);
+CONFIGFS_ATTR_RO(target_stat_tgt_, resets);
+
+static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
+	&target_stat_tgt_attr_inst,
+	&target_stat_tgt_attr_indx,
+	&target_stat_tgt_attr_num_lus,
+	&target_stat_tgt_attr_status,
+	&target_stat_tgt_attr_non_access_lus,
+	&target_stat_tgt_attr_resets,
+	NULL,
+};
+
+static struct config_item_type target_stat_scsi_tgt_dev_cit = {
+	.ct_attrs		= target_stat_scsi_tgt_dev_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Logical Unit Table
+ */
+
+static struct se_device *to_stat_lu_dev(struct config_item *item)
+{
+	struct se_dev_stat_grps *sgrps = container_of(to_config_group(item),
+			struct se_dev_stat_grps, scsi_lu_group);
+	return container_of(sgrps, struct se_device, dev_stat_grps);
+}
+
+static ssize_t target_stat_lu_inst_show(struct config_item *item, char *page)
+{
+	struct se_hba *hba = to_stat_lu_dev(item)->se_hba;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+
+static ssize_t target_stat_lu_dev_show(struct config_item *item, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n",
+			to_stat_lu_dev(item)->dev_index);
+}
+
+static ssize_t target_stat_lu_indx_show(struct config_item *item, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
+}
+
+static ssize_t target_stat_lu_lun_show(struct config_item *item, char *page)
+{
+	/* FIXME: scsiLuDefaultLun */
+	return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
+}
+
+static ssize_t target_stat_lu_lu_name_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+
+	/* scsiLuWwnName */
+	return snprintf(page, PAGE_SIZE, "%s\n",
+			(strlen(dev->t10_wwn.unit_serial)) ?
+			dev->t10_wwn.unit_serial : "None");
+}
+
+static ssize_t target_stat_lu_vend_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+	int i;
+	char str[sizeof(dev->t10_wwn.vendor)+1];
+
+	/* scsiLuVendorId */
+	for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
+		str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
+			dev->t10_wwn.vendor[i] : ' ';
+	str[i] = '\0';
+	return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+
+static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+	int i;
+	char str[sizeof(dev->t10_wwn.model)+1];
+
+	/* scsiLuProductId */
+	for (i = 0; i < sizeof(dev->t10_wwn.model); i++)
+		str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
+			dev->t10_wwn.model[i] : ' ';
+	str[i] = '\0';
+	return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+
+static ssize_t target_stat_lu_rev_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+	int i;
+	char str[sizeof(dev->t10_wwn.revision)+1];
+
+	/* scsiLuRevisionId */
+	for (i = 0; i < sizeof(dev->t10_wwn.revision); i++)
+		str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
+			dev->t10_wwn.revision[i] : ' ';
+	str[i] = '\0';
+	return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+
+static ssize_t target_stat_lu_dev_type_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+
+	/* scsiLuPeripheralType */
+	return snprintf(page, PAGE_SIZE, "%u\n",
+			dev->transport->get_device_type(dev));
+}
+
+static ssize_t target_stat_lu_status_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+
+	/* scsiLuStatus */
+	return snprintf(page, PAGE_SIZE, "%s\n",
+		(dev->export_count) ? "available" : "notavailable");
+}
+
+static ssize_t target_stat_lu_state_bit_show(struct config_item *item,
+		char *page)
+{
+	/* scsiLuState */
+	return snprintf(page, PAGE_SIZE, "exposed\n");
+}
+
+static ssize_t target_stat_lu_num_cmds_show(struct config_item *item,
+		char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+
+	/* scsiLuNumCommands */
+	return snprintf(page, PAGE_SIZE, "%lu\n",
+			atomic_long_read(&dev->num_cmds));
+}
+
+static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item,
+		char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+
+	/* scsiLuReadMegaBytes */
+	return snprintf(page, PAGE_SIZE, "%lu\n",
+			atomic_long_read(&dev->read_bytes) >> 20);
+}
+
+static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item,
+		char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+
+	/* scsiLuWrittenMegaBytes */
+	return snprintf(page, PAGE_SIZE, "%lu\n",
+			atomic_long_read(&dev->write_bytes) >> 20);
+}
+
+static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+
+	/* scsiLuInResets */
+	return snprintf(page, PAGE_SIZE, "%lu\n",
+		atomic_long_read(&dev->num_resets));
+}
+
+static ssize_t target_stat_lu_full_stat_show(struct config_item *item,
+		char *page)
+{
+	/* FIXME: scsiLuOutTaskSetFullStatus */
+	return snprintf(page, PAGE_SIZE, "%u\n", 0);
+}
+
+static ssize_t target_stat_lu_hs_num_cmds_show(struct config_item *item,
+		char *page)
+{
+	/* FIXME: scsiLuHSInCommands */
+	return snprintf(page, PAGE_SIZE, "%u\n", 0);
+}
+
+static ssize_t target_stat_lu_creation_time_show(struct config_item *item,
+		char *page)
+{
+	struct se_device *dev = to_stat_lu_dev(item);
+
+	/* scsiLuCreationTime */
+	return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
+				INITIAL_JIFFIES) * 100 / HZ));
+}
+
+CONFIGFS_ATTR_RO(target_stat_lu_, inst);
+CONFIGFS_ATTR_RO(target_stat_lu_, dev);
+CONFIGFS_ATTR_RO(target_stat_lu_, indx);
+CONFIGFS_ATTR_RO(target_stat_lu_, lun);
+CONFIGFS_ATTR_RO(target_stat_lu_, lu_name);
+CONFIGFS_ATTR_RO(target_stat_lu_, vend);
+CONFIGFS_ATTR_RO(target_stat_lu_, prod);
+CONFIGFS_ATTR_RO(target_stat_lu_, rev);
+CONFIGFS_ATTR_RO(target_stat_lu_, dev_type);
+CONFIGFS_ATTR_RO(target_stat_lu_, status);
+CONFIGFS_ATTR_RO(target_stat_lu_, state_bit);
+CONFIGFS_ATTR_RO(target_stat_lu_, num_cmds);
+CONFIGFS_ATTR_RO(target_stat_lu_, read_mbytes);
+CONFIGFS_ATTR_RO(target_stat_lu_, write_mbytes);
+CONFIGFS_ATTR_RO(target_stat_lu_, resets);
+CONFIGFS_ATTR_RO(target_stat_lu_, full_stat);
+CONFIGFS_ATTR_RO(target_stat_lu_, hs_num_cmds);
+CONFIGFS_ATTR_RO(target_stat_lu_, creation_time);
+
+static struct configfs_attribute *target_stat_scsi_lu_attrs[] = {
+	&target_stat_lu_attr_inst,
+	&target_stat_lu_attr_dev,
+	&target_stat_lu_attr_indx,
+	&target_stat_lu_attr_lun,
+	&target_stat_lu_attr_lu_name,
+	&target_stat_lu_attr_vend,
+	&target_stat_lu_attr_prod,
+	&target_stat_lu_attr_rev,
+	&target_stat_lu_attr_dev_type,
+	&target_stat_lu_attr_status,
+	&target_stat_lu_attr_state_bit,
+	&target_stat_lu_attr_num_cmds,
+	&target_stat_lu_attr_read_mbytes,
+	&target_stat_lu_attr_write_mbytes,
+	&target_stat_lu_attr_resets,
+	&target_stat_lu_attr_full_stat,
+	&target_stat_lu_attr_hs_num_cmds,
+	&target_stat_lu_attr_creation_time,
+	NULL,
+};
+
+static struct config_item_type target_stat_scsi_lu_cit = {
+	.ct_attrs		= target_stat_scsi_lu_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Called from target_core_configfs.c:target_core_make_subdev() to setup
+ * the target statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_dev_default_groups(struct se_device *dev)
+{
+	struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group;
+
+	config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group,
+			"scsi_dev", &target_stat_scsi_dev_cit);
+	config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group,
+			"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
+	config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group,
+			"scsi_lu", &target_stat_scsi_lu_cit);
+
+	dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group;
+	dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group;
+	dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group;
+	dev_stat_grp->default_groups[3] = NULL;
+}
+
+/*
+ * SCSI Port Table
+ */
+
+static struct se_lun *to_stat_port(struct config_item *item)
+{
+	struct se_port_stat_grps *pgrps = container_of(to_config_group(item),
+			struct se_port_stat_grps, scsi_port_group);
+	return container_of(pgrps, struct se_lun, port_stat_grps);
+}
+
+static ssize_t target_stat_port_inst_show(struct config_item *item, char *page)
+{
+	struct se_lun *lun = to_stat_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_port_dev_show(struct config_item *item, char *page)
+{
+	struct se_lun *lun = to_stat_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_port_indx_show(struct config_item *item, char *page)
+{
+	struct se_lun *lun = to_stat_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_port_role_show(struct config_item *item, char *page)
+{
+	struct se_lun *lun = to_stat_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_port_busy_count_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_stat_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev) {
+		/* FIXME: scsiPortBusyStatuses  */
+		ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	}
+	rcu_read_unlock();
+	return ret;
+}
+
+CONFIGFS_ATTR_RO(target_stat_port_, inst);
+CONFIGFS_ATTR_RO(target_stat_port_, dev);
+CONFIGFS_ATTR_RO(target_stat_port_, indx);
+CONFIGFS_ATTR_RO(target_stat_port_, role);
+CONFIGFS_ATTR_RO(target_stat_port_, busy_count);
+
+static struct configfs_attribute *target_stat_scsi_port_attrs[] = {
+	&target_stat_port_attr_inst,
+	&target_stat_port_attr_dev,
+	&target_stat_port_attr_indx,
+	&target_stat_port_attr_role,
+	&target_stat_port_attr_busy_count,
+	NULL,
+};
+
+static struct config_item_type target_stat_scsi_port_cit = {
+	.ct_attrs		= target_stat_scsi_port_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Target Port Table
+ */
+static struct se_lun *to_stat_tgt_port(struct config_item *item)
+{
+	struct se_port_stat_grps *pgrps = container_of(to_config_group(item),
+			struct se_port_stat_grps, scsi_tgt_port_group);
+	return container_of(pgrps, struct se_lun, port_stat_grps);
+}
+
+static ssize_t target_stat_tgt_port_inst_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_stat_tgt_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_tgt_port_dev_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_stat_tgt_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_tgt_port_indx_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_stat_tgt_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_tgt_port_name_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_stat_tgt_port(item);
+	struct se_portal_group *tpg = lun->lun_tpg;
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
+			tpg->se_tpg_tfo->get_fabric_name(),
+			lun->lun_rtpi);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_tgt_port_port_index_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_stat_tgt_port(item);
+	struct se_portal_group *tpg = lun->lun_tpg;
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
+			tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
+			tpg->se_tpg_tfo->tpg_get_tag(tpg));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_tgt_port_in_cmds_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_stat_tgt_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%lu\n",
+			       atomic_long_read(&lun->lun_stats.cmd_pdus));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_tgt_port_write_mbytes_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_stat_tgt_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%u\n",
+			(u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_tgt_port_read_mbytes_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_stat_tgt_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%u\n",
+				(u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_tgt_port_hs_in_cmds_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_stat_tgt_port(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev) {
+		/* FIXME: scsiTgtPortHsInCommands */
+		ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	}
+	rcu_read_unlock();
+	return ret;
+}
+
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, inst);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, dev);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, indx);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, name);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, port_index);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, in_cmds);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, write_mbytes);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, read_mbytes);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, hs_in_cmds);
+
+static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = {
+	&target_stat_tgt_port_attr_inst,
+	&target_stat_tgt_port_attr_dev,
+	&target_stat_tgt_port_attr_indx,
+	&target_stat_tgt_port_attr_name,
+	&target_stat_tgt_port_attr_port_index,
+	&target_stat_tgt_port_attr_in_cmds,
+	&target_stat_tgt_port_attr_write_mbytes,
+	&target_stat_tgt_port_attr_read_mbytes,
+	&target_stat_tgt_port_attr_hs_in_cmds,
+	NULL,
+};
+
+static struct config_item_type target_stat_scsi_tgt_port_cit = {
+	.ct_attrs		= target_stat_scsi_tgt_port_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Transport Table
+ */
+static struct se_lun *to_transport_stat(struct config_item *item)
+{
+	struct se_port_stat_grps *pgrps = container_of(to_config_group(item),
+			struct se_port_stat_grps, scsi_transport_group);
+	return container_of(pgrps, struct se_lun, port_stat_grps);
+}
+
+static ssize_t target_stat_transport_inst_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_transport_stat(item);
+	struct se_device *dev;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_transport_device_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_transport_stat(item);
+	struct se_device *dev;
+	struct se_portal_group *tpg = lun->lun_tpg;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev) {
+		/* scsiTransportType */
+		ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
+			       tpg->se_tpg_tfo->get_fabric_name());
+	}
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_transport_indx_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_transport_stat(item);
+	struct se_device *dev;
+	struct se_portal_group *tpg = lun->lun_tpg;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev)
+		ret = snprintf(page, PAGE_SIZE, "%u\n",
+			       tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_transport_dev_name_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun *lun = to_transport_stat(item);
+	struct se_device *dev;
+	struct se_portal_group *tpg = lun->lun_tpg;
+	struct t10_wwn *wwn;
+	ssize_t ret = -ENODEV;
+
+	rcu_read_lock();
+	dev = rcu_dereference(lun->lun_se_dev);
+	if (dev) {
+		wwn = &dev->t10_wwn;
+		/* scsiTransportDevName */
+		ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
+				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+				(strlen(wwn->unit_serial)) ? wwn->unit_serial :
+				wwn->vendor);
+	}
+	rcu_read_unlock();
+	return ret;
+}
+
+CONFIGFS_ATTR_RO(target_stat_transport_, inst);
+CONFIGFS_ATTR_RO(target_stat_transport_, device);
+CONFIGFS_ATTR_RO(target_stat_transport_, indx);
+CONFIGFS_ATTR_RO(target_stat_transport_, dev_name);
+
+static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
+	&target_stat_transport_attr_inst,
+	&target_stat_transport_attr_device,
+	&target_stat_transport_attr_indx,
+	&target_stat_transport_attr_dev_name,
+	NULL,
+};
+
+static struct config_item_type target_stat_scsi_transport_cit = {
+	.ct_attrs		= target_stat_scsi_transport_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup
+ * the target port statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_port_default_groups(struct se_lun *lun)
+{
+	struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group;
+
+	config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
+			"scsi_port", &target_stat_scsi_port_cit);
+	config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
+			"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
+	config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
+			"scsi_transport", &target_stat_scsi_transport_cit);
+
+	port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group;
+	port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group;
+	port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group;
+	port_stat_grp->default_groups[3] = NULL;
+}
+
+/*
+ * SCSI Authorized Initiator Table
+ */
+
+static struct se_lun_acl *auth_to_lacl(struct config_item *item)
+{
+	struct se_ml_stat_grps *lgrps = container_of(to_config_group(item),
+			struct se_ml_stat_grps, scsi_auth_intr_group);
+	return container_of(lgrps, struct se_lun_acl, ml_stat_grps);
+}
+
+static ssize_t target_stat_auth_inst_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiInstIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_dev_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	lun = rcu_dereference(deve->se_lun);
+	/* scsiDeviceIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_port_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiAuthIntrTgtPortIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_indx_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* scsiAuthIntrIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_dev_or_port_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* scsiAuthIntrDevOrPort */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_intr_name_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* scsiAuthIntrName */
+	ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_map_indx_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* FIXME: scsiAuthIntrLunMapIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_att_count_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* scsiAuthIntrAttachedTimes */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_num_cmds_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* scsiAuthIntrOutCommands */
+	ret = snprintf(page, PAGE_SIZE, "%lu\n",
+		       atomic_long_read(&deve->total_cmds));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* scsiAuthIntrReadMegaBytes */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+		      (u32)(atomic_long_read(&deve->read_bytes) >> 20));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* scsiAuthIntrWrittenMegaBytes */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+		      (u32)(atomic_long_read(&deve->write_bytes) >> 20));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_hs_num_cmds_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* FIXME: scsiAuthIntrHSOutCommands */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_creation_time_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* scsiAuthIntrLastCreation */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
+				INITIAL_JIFFIES) * 100 / HZ));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_auth_row_status_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = auth_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* FIXME: scsiAuthIntrRowStatus */
+	ret = snprintf(page, PAGE_SIZE, "Ready\n");
+	rcu_read_unlock();
+	return ret;
+}
+
+CONFIGFS_ATTR_RO(target_stat_auth_, inst);
+CONFIGFS_ATTR_RO(target_stat_auth_, dev);
+CONFIGFS_ATTR_RO(target_stat_auth_, port);
+CONFIGFS_ATTR_RO(target_stat_auth_, indx);
+CONFIGFS_ATTR_RO(target_stat_auth_, dev_or_port);
+CONFIGFS_ATTR_RO(target_stat_auth_, intr_name);
+CONFIGFS_ATTR_RO(target_stat_auth_, map_indx);
+CONFIGFS_ATTR_RO(target_stat_auth_, att_count);
+CONFIGFS_ATTR_RO(target_stat_auth_, num_cmds);
+CONFIGFS_ATTR_RO(target_stat_auth_, read_mbytes);
+CONFIGFS_ATTR_RO(target_stat_auth_, write_mbytes);
+CONFIGFS_ATTR_RO(target_stat_auth_, hs_num_cmds);
+CONFIGFS_ATTR_RO(target_stat_auth_, creation_time);
+CONFIGFS_ATTR_RO(target_stat_auth_, row_status);
+
+static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = {
+	&target_stat_auth_attr_inst,
+	&target_stat_auth_attr_dev,
+	&target_stat_auth_attr_port,
+	&target_stat_auth_attr_indx,
+	&target_stat_auth_attr_dev_or_port,
+	&target_stat_auth_attr_intr_name,
+	&target_stat_auth_attr_map_indx,
+	&target_stat_auth_attr_att_count,
+	&target_stat_auth_attr_num_cmds,
+	&target_stat_auth_attr_read_mbytes,
+	&target_stat_auth_attr_write_mbytes,
+	&target_stat_auth_attr_hs_num_cmds,
+	&target_stat_auth_attr_creation_time,
+	&target_stat_auth_attr_row_status,
+	NULL,
+};
+
+static struct config_item_type target_stat_scsi_auth_intr_cit = {
+	.ct_attrs		= target_stat_scsi_auth_intr_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Attached Initiator Port Table
+ */
+
+static struct se_lun_acl *iport_to_lacl(struct config_item *item)
+{
+	struct se_ml_stat_grps *lgrps = container_of(to_config_group(item),
+			struct se_ml_stat_grps, scsi_att_intr_port_group);
+	return container_of(lgrps, struct se_lun_acl, ml_stat_grps);
+}
+
+static ssize_t target_stat_iport_inst_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = iport_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiInstIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_iport_dev_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = iport_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	lun = rcu_dereference(deve->se_lun);
+	/* scsiDeviceIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_iport_port_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = iport_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiPortIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_iport_indx_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = iport_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_session *se_sess;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->nacl_sess_lock);
+	se_sess = nacl->nacl_sess;
+	if (!se_sess) {
+		spin_unlock_irq(&nacl->nacl_sess_lock);
+		return -ENODEV;
+	}
+
+	tpg = nacl->se_tpg;
+	/* scsiAttIntrPortIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			tpg->se_tpg_tfo->sess_get_index(se_sess));
+	spin_unlock_irq(&nacl->nacl_sess_lock);
+	return ret;
+}
+
+static ssize_t target_stat_iport_port_auth_indx_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = iport_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	/* scsiAttIntrPortAuthIntrIdx */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
+	rcu_read_unlock();
+	return ret;
+}
+
+static ssize_t target_stat_iport_port_ident_show(struct config_item *item,
+		char *page)
+{
+	struct se_lun_acl *lacl = iport_to_lacl(item);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_session *se_sess;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+	unsigned char buf[64];
+
+	spin_lock_irq(&nacl->nacl_sess_lock);
+	se_sess = nacl->nacl_sess;
+	if (!se_sess) {
+		spin_unlock_irq(&nacl->nacl_sess_lock);
+		return -ENODEV;
+	}
+
+	tpg = nacl->se_tpg;
+	/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
+	memset(buf, 0, 64);
+	if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
+		tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64);
+
+	ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
+	spin_unlock_irq(&nacl->nacl_sess_lock);
+	return ret;
+}
+
+CONFIGFS_ATTR_RO(target_stat_iport_, inst);
+CONFIGFS_ATTR_RO(target_stat_iport_, dev);
+CONFIGFS_ATTR_RO(target_stat_iport_, port);
+CONFIGFS_ATTR_RO(target_stat_iport_, indx);
+CONFIGFS_ATTR_RO(target_stat_iport_, port_auth_indx);
+CONFIGFS_ATTR_RO(target_stat_iport_, port_ident);
+
+static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = {
+	&target_stat_iport_attr_inst,
+	&target_stat_iport_attr_dev,
+	&target_stat_iport_attr_port,
+	&target_stat_iport_attr_indx,
+	&target_stat_iport_attr_port_auth_indx,
+	&target_stat_iport_attr_port_ident,
+	NULL,
+};
+
+static struct config_item_type target_stat_scsi_att_intr_port_cit = {
+	.ct_attrs		= target_stat_scsi_ath_intr_port_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup
+ * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
+{
+	struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group;
+
+	config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
+			"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
+	config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
+			"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
+
+	ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group;
+	ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group;
+	ml_stat_grp->default_groups[2] = NULL;
+}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
new file mode 100644
index 0000000..c9be953
--- /dev/null
+++ b/drivers/target/target_core_tmr.c
@@ -0,0 +1,458 @@
+/*******************************************************************************
+ * Filename:  target_core_tmr.c
+ *
+ * This file contains SPC-3 task management infrastructure
+ *
+ * (c) Copyright 2009-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/export.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+
+int core_tmr_alloc_req(
+	struct se_cmd *se_cmd,
+	void *fabric_tmr_ptr,
+	u8 function,
+	gfp_t gfp_flags)
+{
+	struct se_tmr_req *tmr;
+
+	tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
+	if (!tmr) {
+		pr_err("Unable to allocate struct se_tmr_req\n");
+		return -ENOMEM;
+	}
+
+	se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
+	se_cmd->se_tmr_req = tmr;
+	tmr->task_cmd = se_cmd;
+	tmr->fabric_tmr_ptr = fabric_tmr_ptr;
+	tmr->function = function;
+	INIT_LIST_HEAD(&tmr->tmr_list);
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tmr_alloc_req);
+
+void core_tmr_release_req(struct se_tmr_req *tmr)
+{
+	struct se_device *dev = tmr->tmr_dev;
+	unsigned long flags;
+
+	if (dev) {
+		spin_lock_irqsave(&dev->se_tmr_lock, flags);
+		list_del_init(&tmr->tmr_list);
+		spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+	}
+
+	kfree(tmr);
+}
+
+static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+{
+	unsigned long flags;
+	bool remove = true, send_tas;
+	/*
+	 * TASK ABORTED status (TAS) bit support
+	 */
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	send_tas = (cmd->transport_state & CMD_T_TAS);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	if (send_tas) {
+		remove = false;
+		transport_send_task_abort(cmd);
+	}
+
+	return transport_cmd_finish_abort(cmd, remove);
+}
+
+static int target_check_cdb_and_preempt(struct list_head *list,
+		struct se_cmd *cmd)
+{
+	struct t10_pr_registration *reg;
+
+	if (!list)
+		return 0;
+	list_for_each_entry(reg, list, pr_reg_abort_list) {
+		if (reg->pr_res_key == cmd->pr_res_key)
+			return 0;
+	}
+
+	return 1;
+}
+
+static bool __target_check_io_state(struct se_cmd *se_cmd,
+				    struct se_session *tmr_sess, int tas)
+{
+	struct se_session *sess = se_cmd->se_sess;
+
+	assert_spin_locked(&sess->sess_cmd_lock);
+	WARN_ON_ONCE(!irqs_disabled());
+	/*
+	 * If command already reached CMD_T_COMPLETE state within
+	 * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
+	 * this se_cmd has been passed to fabric driver and will
+	 * not be aborted.
+	 *
+	 * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
+	 * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
+	 * long as se_cmd->cmd_kref is still active unless zero.
+	 */
+	spin_lock(&se_cmd->t_state_lock);
+	if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
+		pr_debug("Attempted to abort io tag: %llu already complete or"
+			" fabric stop, skipping\n", se_cmd->tag);
+		spin_unlock(&se_cmd->t_state_lock);
+		return false;
+	}
+	if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+		pr_debug("Attempted to abort io tag: %llu already shutdown,"
+			" skipping\n", se_cmd->tag);
+		spin_unlock(&se_cmd->t_state_lock);
+		return false;
+	}
+	se_cmd->transport_state |= CMD_T_ABORTED;
+
+	if ((tmr_sess != se_cmd->se_sess) && tas)
+		se_cmd->transport_state |= CMD_T_TAS;
+
+	spin_unlock(&se_cmd->t_state_lock);
+
+	return kref_get_unless_zero(&se_cmd->cmd_kref);
+}
+
+void core_tmr_abort_task(
+	struct se_device *dev,
+	struct se_tmr_req *tmr,
+	struct se_session *se_sess)
+{
+	struct se_cmd *se_cmd;
+	unsigned long flags;
+	u64 ref_tag;
+
+	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+	list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
+
+		if (dev != se_cmd->se_dev)
+			continue;
+
+		/* skip task management functions, including tmr->task_cmd */
+		if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+			continue;
+
+		ref_tag = se_cmd->tag;
+		if (tmr->ref_task_tag != ref_tag)
+			continue;
+
+		printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
+			se_cmd->se_tfo->get_fabric_name(), ref_tag);
+
+		if (!__target_check_io_state(se_cmd, se_sess, 0)) {
+			spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+			goto out;
+		}
+		list_del_init(&se_cmd->se_cmd_list);
+		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+		cancel_work_sync(&se_cmd->work);
+		transport_wait_for_tasks(se_cmd);
+
+		if (!transport_cmd_finish_abort(se_cmd, true))
+			target_put_sess_cmd(se_cmd);
+
+		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+				" ref_tag: %llu\n", ref_tag);
+		tmr->response = TMR_FUNCTION_COMPLETE;
+		return;
+	}
+	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+out:
+	printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n",
+			tmr->ref_task_tag);
+	tmr->response = TMR_TASK_DOES_NOT_EXIST;
+}
+
+static void core_tmr_drain_tmr_list(
+	struct se_device *dev,
+	struct se_tmr_req *tmr,
+	struct list_head *preempt_and_abort_list)
+{
+	LIST_HEAD(drain_tmr_list);
+	struct se_session *sess;
+	struct se_tmr_req *tmr_p, *tmr_pp;
+	struct se_cmd *cmd;
+	unsigned long flags;
+	bool rc;
+	/*
+	 * Release all pending and outgoing TMRs aside from the received
+	 * LUN_RESET tmr..
+	 */
+	spin_lock_irqsave(&dev->se_tmr_lock, flags);
+	list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
+		/*
+		 * Allow the received TMR to return with FUNCTION_COMPLETE.
+		 */
+		if (tmr_p == tmr)
+			continue;
+
+		cmd = tmr_p->task_cmd;
+		if (!cmd) {
+			pr_err("Unable to locate struct se_cmd for TMR\n");
+			continue;
+		}
+		/*
+		 * If this function was called with a valid pr_res_key
+		 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
+		 * skip non regisration key matching TMRs.
+		 */
+		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
+			continue;
+
+		sess = cmd->se_sess;
+		if (WARN_ON_ONCE(!sess))
+			continue;
+
+		spin_lock(&sess->sess_cmd_lock);
+		spin_lock(&cmd->t_state_lock);
+		if (!(cmd->transport_state & CMD_T_ACTIVE) ||
+		     (cmd->transport_state & CMD_T_FABRIC_STOP)) {
+			spin_unlock(&cmd->t_state_lock);
+			spin_unlock(&sess->sess_cmd_lock);
+			continue;
+		}
+		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+			spin_unlock(&cmd->t_state_lock);
+			spin_unlock(&sess->sess_cmd_lock);
+			continue;
+		}
+		if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+			spin_unlock(&cmd->t_state_lock);
+			spin_unlock(&sess->sess_cmd_lock);
+			continue;
+		}
+		cmd->transport_state |= CMD_T_ABORTED;
+		spin_unlock(&cmd->t_state_lock);
+
+		rc = kref_get_unless_zero(&cmd->cmd_kref);
+		if (!rc) {
+			printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
+			spin_unlock(&sess->sess_cmd_lock);
+			continue;
+		}
+		spin_unlock(&sess->sess_cmd_lock);
+
+		list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+	}
+	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+
+	list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
+		list_del_init(&tmr_p->tmr_list);
+		cmd = tmr_p->task_cmd;
+
+		pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+			" Response: 0x%02x, t_state: %d\n",
+			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+			tmr_p->function, tmr_p->response, cmd->t_state);
+
+		cancel_work_sync(&cmd->work);
+		transport_wait_for_tasks(cmd);
+
+		if (!transport_cmd_finish_abort(cmd, 1))
+			target_put_sess_cmd(cmd);
+	}
+}
+
+static void core_tmr_drain_state_list(
+	struct se_device *dev,
+	struct se_cmd *prout_cmd,
+	struct se_session *tmr_sess,
+	int tas,
+	struct list_head *preempt_and_abort_list)
+{
+	LIST_HEAD(drain_task_list);
+	struct se_session *sess;
+	struct se_cmd *cmd, *next;
+	unsigned long flags;
+	int rc;
+
+	/*
+	 * Complete outstanding commands with TASK_ABORTED SAM status.
+	 *
+	 * This is following sam4r17, section 5.6 Aborting commands, Table 38
+	 * for TMR LUN_RESET:
+	 *
+	 * a) "Yes" indicates that each command that is aborted on an I_T nexus
+	 * other than the one that caused the SCSI device condition is
+	 * completed with TASK ABORTED status, if the TAS bit is set to one in
+	 * the Control mode page (see SPC-4). "No" indicates that no status is
+	 * returned for aborted commands.
+	 *
+	 * d) If the logical unit reset is caused by a particular I_T nexus
+	 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
+	 * (TASK_ABORTED status) applies.
+	 *
+	 * Otherwise (e.g., if triggered by a hard reset), "no"
+	 * (no TASK_ABORTED SAM status) applies.
+	 *
+	 * Note that this seems to be independent of TAS (Task Aborted Status)
+	 * in the Control Mode Page.
+	 */
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
+		/*
+		 * For PREEMPT_AND_ABORT usage, only process commands
+		 * with a matching reservation key.
+		 */
+		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
+			continue;
+
+		/*
+		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+		 */
+		if (prout_cmd == cmd)
+			continue;
+
+		sess = cmd->se_sess;
+		if (WARN_ON_ONCE(!sess))
+			continue;
+
+		spin_lock(&sess->sess_cmd_lock);
+		rc = __target_check_io_state(cmd, tmr_sess, tas);
+		spin_unlock(&sess->sess_cmd_lock);
+		if (!rc)
+			continue;
+
+		list_move_tail(&cmd->state_list, &drain_task_list);
+		cmd->state_active = false;
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+	while (!list_empty(&drain_task_list)) {
+		cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
+		list_del_init(&cmd->state_list);
+
+		pr_debug("LUN_RESET: %s cmd: %p"
+			" ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
+			"cdb: 0x%02x\n",
+			(preempt_and_abort_list) ? "Preempt" : "", cmd,
+			cmd->tag, 0,
+			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
+			cmd->t_task_cdb[0]);
+		pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx"
+			" -- CMD_T_ACTIVE: %d"
+			" CMD_T_STOP: %d CMD_T_SENT: %d\n",
+			cmd->tag, cmd->pr_res_key,
+			(cmd->transport_state & CMD_T_ACTIVE) != 0,
+			(cmd->transport_state & CMD_T_STOP) != 0,
+			(cmd->transport_state & CMD_T_SENT) != 0);
+
+		/*
+		 * If the command may be queued onto a workqueue cancel it now.
+		 *
+		 * This is equivalent to removal from the execute queue in the
+		 * loop above, but we do it down here given that
+		 * cancel_work_sync may block.
+		 */
+		cancel_work_sync(&cmd->work);
+		transport_wait_for_tasks(cmd);
+
+		if (!core_tmr_handle_tas_abort(cmd, tas))
+			target_put_sess_cmd(cmd);
+	}
+}
+
+int core_tmr_lun_reset(
+        struct se_device *dev,
+        struct se_tmr_req *tmr,
+        struct list_head *preempt_and_abort_list,
+        struct se_cmd *prout_cmd)
+{
+	struct se_node_acl *tmr_nacl = NULL;
+	struct se_portal_group *tmr_tpg = NULL;
+	struct se_session *tmr_sess = NULL;
+	int tas;
+        /*
+	 * TASK_ABORTED status bit, this is configurable via ConfigFS
+	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page
+	 *
+	 * A task aborted status (TAS) bit set to zero specifies that aborted
+	 * tasks shall be terminated by the device server without any response
+	 * to the application client. A TAS bit set to one specifies that tasks
+	 * aborted by the actions of an I_T nexus other than the I_T nexus on
+	 * which the command was received shall be completed with TASK ABORTED
+	 * status (see SAM-4).
+	 */
+	tas = dev->dev_attrib.emulate_tas;
+	/*
+	 * Determine if this se_tmr is coming from a $FABRIC_MOD
+	 * or struct se_device passthrough..
+	 */
+	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+		tmr_sess = tmr->task_cmd->se_sess;
+		tmr_nacl = tmr_sess->se_node_acl;
+		tmr_tpg = tmr_sess->se_tpg;
+		if (tmr_nacl && tmr_tpg) {
+			pr_debug("LUN_RESET: TMR caller fabric: %s"
+				" initiator port %s\n",
+				tmr_tpg->se_tpg_tfo->get_fabric_name(),
+				tmr_nacl->initiatorname);
+		}
+	}
+	pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
+		(preempt_and_abort_list) ? "Preempt" : "TMR",
+		dev->transport->name, tas);
+
+	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+	core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
+				preempt_and_abort_list);
+
+	/*
+	 * Clear any legacy SPC-2 reservation when called during
+	 * LOGICAL UNIT RESET
+	 */
+	if (!preempt_and_abort_list &&
+	     (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
+		spin_lock(&dev->dev_reservation_lock);
+		dev->dev_reserved_node_acl = NULL;
+		dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
+		spin_unlock(&dev->dev_reservation_lock);
+		pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
+	}
+
+	atomic_long_inc(&dev->num_resets);
+
+	pr_debug("LUN_RESET: %s for [%s] Complete\n",
+			(preempt_and_abort_list) ? "Preempt" : "TMR",
+			dev->transport->name);
+	return 0;
+}
+
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
new file mode 100644
index 0000000..ee16a45
--- /dev/null
+++ b/drivers/target/target_core_tpg.c
@@ -0,0 +1,704 @@
+/*******************************************************************************
+ * Filename:  target_core_tpg.c
+ *
+ * This file contains generic Target Portal Group related functions.
+ *
+ * (c) Copyright 2002-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/export.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_proto.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+extern struct se_device *g_lun0_dev;
+
+static DEFINE_SPINLOCK(tpg_lock);
+static LIST_HEAD(tpg_list);
+
+/*	__core_tpg_get_initiator_node_acl():
+ *
+ *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
+ */
+struct se_node_acl *__core_tpg_get_initiator_node_acl(
+	struct se_portal_group *tpg,
+	const char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+		if (!strcmp(acl->initiatorname, initiatorname))
+			return acl;
+	}
+
+	return NULL;
+}
+
+/*	core_tpg_get_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_get_initiator_node_acl(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+	/*
+	 * Obtain se_node_acl->acl_kref using fabric driver provided
+	 * initiatorname[] during node acl endpoint lookup driven by
+	 * new se_session login.
+	 *
+	 * The reference is held until se_session shutdown -> release
+	 * occurs via fabric driver invoked transport_deregister_session()
+	 * or transport_free_session() code.
+	 */
+	mutex_lock(&tpg->acl_node_mutex);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (acl) {
+		if (!kref_get_unless_zero(&acl->acl_kref))
+			acl = NULL;
+	}
+	mutex_unlock(&tpg->acl_node_mutex);
+
+	return acl;
+}
+EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
+
+void core_allocate_nexus_loss_ua(
+	struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+
+	if (!nacl)
+		return;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+		core_scsi3_ua_allocate(deve, 0x29,
+			ASCQ_29H_NEXUS_LOSS_OCCURRED);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
+
+/*	core_tpg_add_node_to_devs():
+ *
+ *
+ */
+void core_tpg_add_node_to_devs(
+	struct se_node_acl *acl,
+	struct se_portal_group *tpg,
+	struct se_lun *lun_orig)
+{
+	u32 lun_access = 0;
+	struct se_lun *lun;
+	struct se_device *dev;
+
+	mutex_lock(&tpg->tpg_lun_mutex);
+	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
+		if (lun_orig && lun != lun_orig)
+			continue;
+
+		dev = rcu_dereference_check(lun->lun_se_dev,
+					    lockdep_is_held(&tpg->tpg_lun_mutex));
+		/*
+		 * By default in LIO-Target $FABRIC_MOD,
+		 * demo_mode_write_protect is ON, or READ_ONLY;
+		 */
+		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
+			lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		} else {
+			/*
+			 * Allow only optical drives to issue R/W in default RO
+			 * demo mode.
+			 */
+			if (dev->transport->get_device_type(dev) == TYPE_DISK)
+				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+			else
+				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		}
+
+		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
+			" access for LUN in Demo Mode\n",
+			tpg->se_tpg_tfo->get_fabric_name(),
+			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
+			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
+			"READ-WRITE" : "READ-ONLY");
+
+		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
+						 lun_access, acl, tpg);
+		/*
+		 * Check to see if there are any existing persistent reservation
+		 * APTPL pre-registrations that need to be enabled for this dynamic
+		 * LUN ACL now..
+		 */
+		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
+						    lun->unpacked_lun);
+	}
+	mutex_unlock(&tpg->tpg_lun_mutex);
+}
+
+static void
+target_set_nacl_queue_depth(struct se_portal_group *tpg,
+			    struct se_node_acl *acl, u32 queue_depth)
+{
+	acl->queue_depth = queue_depth;
+
+	if (!acl->queue_depth) {
+		pr_warn("Queue depth for %s Initiator Node: %s is 0,"
+			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
+			acl->initiatorname);
+		acl->queue_depth = 1;
+	}
+}
+
+static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
+		const unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+	u32 queue_depth;
+
+	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
+			GFP_KERNEL);
+	if (!acl)
+		return NULL;
+
+	INIT_LIST_HEAD(&acl->acl_list);
+	INIT_LIST_HEAD(&acl->acl_sess_list);
+	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
+	kref_init(&acl->acl_kref);
+	init_completion(&acl->acl_free_comp);
+	spin_lock_init(&acl->nacl_sess_lock);
+	mutex_init(&acl->lun_entry_mutex);
+	atomic_set(&acl->acl_pr_ref_count, 0);
+
+	if (tpg->se_tpg_tfo->tpg_get_default_depth)
+		queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
+	else
+		queue_depth = 1;
+	target_set_nacl_queue_depth(tpg, acl, queue_depth);
+
+	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+	acl->se_tpg = tpg;
+	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+
+	tpg->se_tpg_tfo->set_default_node_attributes(acl);
+
+	return acl;
+}
+
+static void target_add_node_acl(struct se_node_acl *acl)
+{
+	struct se_portal_group *tpg = acl->se_tpg;
+
+	mutex_lock(&tpg->acl_node_mutex);
+	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+	tpg->num_node_acls++;
+	mutex_unlock(&tpg->acl_node_mutex);
+
+	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n",
+		tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg),
+		acl->dynamic_node_acl ? "DYNAMIC" : "",
+		acl->queue_depth,
+		tpg->se_tpg_tfo->get_fabric_name(),
+		acl->initiatorname);
+}
+
+bool target_tpg_has_node_acl(struct se_portal_group *tpg,
+			     const char *initiatorname)
+{
+	struct se_node_acl *acl;
+	bool found = false;
+
+	mutex_lock(&tpg->acl_node_mutex);
+	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+		if (!strcmp(acl->initiatorname, initiatorname)) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&tpg->acl_node_mutex);
+
+	return found;
+}
+EXPORT_SYMBOL(target_tpg_has_node_acl);
+
+struct se_node_acl *core_tpg_check_initiator_node_acl(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (acl)
+		return acl;
+
+	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
+		return NULL;
+
+	acl = target_alloc_node_acl(tpg, initiatorname);
+	if (!acl)
+		return NULL;
+	/*
+	 * When allocating a dynamically generated node_acl, go ahead
+	 * and take the extra kref now before returning to the fabric
+	 * driver caller.
+	 *
+	 * Note this reference will be released at session shutdown
+	 * time within transport_free_session() code.
+	 */
+	kref_get(&acl->acl_kref);
+	acl->dynamic_node_acl = 1;
+
+	/*
+	 * Here we only create demo-mode MappedLUNs from the active
+	 * TPG LUNs if the fabric is not explicitly asking for
+	 * tpg_check_demo_mode_login_only() == 1.
+	 */
+	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
+	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
+		core_tpg_add_node_to_devs(acl, tpg, NULL);
+
+	target_add_node_acl(acl);
+	return acl;
+}
+EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
+
+void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
+{
+	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
+		cpu_relax();
+}
+
+struct se_node_acl *core_tpg_add_initiator_node_acl(
+	struct se_portal_group *tpg,
+	const char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	mutex_lock(&tpg->acl_node_mutex);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (acl) {
+		if (acl->dynamic_node_acl) {
+			acl->dynamic_node_acl = 0;
+			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
+				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
+			mutex_unlock(&tpg->acl_node_mutex);
+			return acl;
+		}
+
+		pr_err("ACL entry for %s Initiator"
+			" Node %s already exists for TPG %u, ignoring"
+			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
+			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		mutex_unlock(&tpg->acl_node_mutex);
+		return ERR_PTR(-EEXIST);
+	}
+	mutex_unlock(&tpg->acl_node_mutex);
+
+	acl = target_alloc_node_acl(tpg, initiatorname);
+	if (!acl)
+		return ERR_PTR(-ENOMEM);
+
+	target_add_node_acl(acl);
+	return acl;
+}
+
+void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
+{
+	struct se_portal_group *tpg = acl->se_tpg;
+	LIST_HEAD(sess_list);
+	struct se_session *sess, *sess_tmp;
+	unsigned long flags;
+	int rc;
+
+	mutex_lock(&tpg->acl_node_mutex);
+	if (acl->dynamic_node_acl) {
+		acl->dynamic_node_acl = 0;
+	}
+	list_del_init(&acl->acl_list);
+	tpg->num_node_acls--;
+	mutex_unlock(&tpg->acl_node_mutex);
+
+	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+	acl->acl_stop = 1;
+
+	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
+				sess_acl_list) {
+		if (sess->sess_tearing_down != 0)
+			continue;
+
+		if (!target_get_session(sess))
+			continue;
+		list_move(&sess->sess_acl_list, &sess_list);
+	}
+	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
+		list_del(&sess->sess_acl_list);
+
+		rc = tpg->se_tpg_tfo->shutdown_session(sess);
+		target_put_session(sess);
+		if (!rc)
+			continue;
+		target_put_session(sess);
+	}
+	target_put_nacl(acl);
+	/*
+	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
+	 * for active fabric session transport_deregister_session() callbacks.
+	 */
+	wait_for_completion(&acl->acl_free_comp);
+
+	core_tpg_wait_for_nacl_pr_ref(acl);
+	core_free_device_list_for_node(acl, tpg);
+
+	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
+
+	kfree(acl);
+}
+
+/*	core_tpg_set_initiator_node_queue_depth():
+ *
+ *
+ */
+int core_tpg_set_initiator_node_queue_depth(
+	struct se_node_acl *acl,
+	u32 queue_depth)
+{
+	LIST_HEAD(sess_list);
+	struct se_portal_group *tpg = acl->se_tpg;
+	struct se_session *sess, *sess_tmp;
+	unsigned long flags;
+	int rc;
+
+	/*
+	 * User has requested to change the queue depth for a Initiator Node.
+	 * Change the value in the Node's struct se_node_acl, and call
+	 * target_set_nacl_queue_depth() to set the new queue depth.
+	 */
+	target_set_nacl_queue_depth(tpg, acl, queue_depth);
+
+	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
+				 sess_acl_list) {
+		if (sess->sess_tearing_down != 0)
+			continue;
+		if (!target_get_session(sess))
+			continue;
+		spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+		/*
+		 * Finally call tpg->se_tpg_tfo->close_session() to force session
+		 * reinstatement to occur if there is an active session for the
+		 * $FABRIC_MOD Initiator Node in question.
+		 */
+		rc = tpg->se_tpg_tfo->shutdown_session(sess);
+		target_put_session(sess);
+		if (!rc) {
+			spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+			continue;
+		}
+		target_put_session(sess);
+		spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+	}
+	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+	pr_debug("Successfully changed queue depth to: %d for Initiator"
+		" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
+		acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg));
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+
+/*	core_tpg_set_initiator_node_tag():
+ *
+ *	Initiator nodeacl tags are not used internally, but may be used by
+ *	userspace to emulate aliases or groups.
+ *	Returns length of newly-set tag or -EINVAL.
+ */
+int core_tpg_set_initiator_node_tag(
+	struct se_portal_group *tpg,
+	struct se_node_acl *acl,
+	const char *new_tag)
+{
+	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
+		return -EINVAL;
+
+	if (!strncmp("NULL", new_tag, 4)) {
+		acl->acl_tag[0] = '\0';
+		return 0;
+	}
+
+	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
+
+static void core_tpg_lun_ref_release(struct percpu_ref *ref)
+{
+	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+	complete(&lun->lun_shutdown_comp);
+}
+
+int core_tpg_register(
+	struct se_wwn *se_wwn,
+	struct se_portal_group *se_tpg,
+	int proto_id)
+{
+	int ret;
+
+	if (!se_tpg)
+		return -EINVAL;
+	/*
+	 * For the typical case where core_tpg_register() is called by a
+	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
+	 * configfs context, use the original tf_ops pointer already saved
+	 * by target-core in target_fabric_make_wwn().
+	 *
+	 * Otherwise, for special cases like iscsi-target discovery TPGs
+	 * the caller is responsible for setting ->se_tpg_tfo ahead of
+	 * calling core_tpg_register().
+	 */
+	if (se_wwn)
+		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
+
+	if (!se_tpg->se_tpg_tfo) {
+		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
+		return -EINVAL;
+	}
+
+	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
+	se_tpg->proto_id = proto_id;
+	se_tpg->se_tpg_wwn = se_wwn;
+	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
+	INIT_LIST_HEAD(&se_tpg->acl_node_list);
+	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
+	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
+	spin_lock_init(&se_tpg->session_lock);
+	mutex_init(&se_tpg->tpg_lun_mutex);
+	mutex_init(&se_tpg->acl_node_mutex);
+
+	if (se_tpg->proto_id >= 0) {
+		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
+		if (IS_ERR(se_tpg->tpg_virt_lun0))
+			return PTR_ERR(se_tpg->tpg_virt_lun0);
+
+		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
+				TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
+		if (ret < 0) {
+			kfree(se_tpg->tpg_virt_lun0);
+			return ret;
+		}
+	}
+
+	spin_lock_bh(&tpg_lock);
+	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
+	spin_unlock_bh(&tpg_lock);
+
+	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
+		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
+		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
+		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
+		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_register);
+
+int core_tpg_deregister(struct se_portal_group *se_tpg)
+{
+	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
+	struct se_node_acl *nacl, *nacl_tmp;
+	LIST_HEAD(node_list);
+
+	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
+		 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
+		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
+		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
+
+	spin_lock_bh(&tpg_lock);
+	list_del(&se_tpg->se_tpg_node);
+	spin_unlock_bh(&tpg_lock);
+
+	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
+		cpu_relax();
+
+	mutex_lock(&se_tpg->acl_node_mutex);
+	list_splice_init(&se_tpg->acl_node_list, &node_list);
+	mutex_unlock(&se_tpg->acl_node_mutex);
+	/*
+	 * Release any remaining demo-mode generated se_node_acl that have
+	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+	 * in transport_deregister_session().
+	 */
+	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
+		list_del_init(&nacl->acl_list);
+		se_tpg->num_node_acls--;
+
+		core_tpg_wait_for_nacl_pr_ref(nacl);
+		core_free_device_list_for_node(nacl, se_tpg);
+		kfree(nacl);
+	}
+
+	if (se_tpg->proto_id >= 0) {
+		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
+		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_deregister);
+
+struct se_lun *core_tpg_alloc_lun(
+	struct se_portal_group *tpg,
+	u64 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
+	if (!lun) {
+		pr_err("Unable to allocate se_lun memory\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	lun->unpacked_lun = unpacked_lun;
+	lun->lun_link_magic = SE_LUN_LINK_MAGIC;
+	atomic_set(&lun->lun_acl_count, 0);
+	init_completion(&lun->lun_ref_comp);
+	init_completion(&lun->lun_shutdown_comp);
+	INIT_LIST_HEAD(&lun->lun_deve_list);
+	INIT_LIST_HEAD(&lun->lun_dev_link);
+	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
+	spin_lock_init(&lun->lun_deve_lock);
+	mutex_init(&lun->lun_tg_pt_md_mutex);
+	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
+	spin_lock_init(&lun->lun_tg_pt_gp_lock);
+	lun->lun_tpg = tpg;
+
+	return lun;
+}
+
+int core_tpg_add_lun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	u32 lun_access,
+	struct se_device *dev)
+{
+	int ret;
+
+	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
+			      GFP_KERNEL);
+	if (ret < 0)
+		goto out;
+
+	ret = core_alloc_rtpi(lun, dev);
+	if (ret)
+		goto out_kill_ref;
+
+	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
+
+	mutex_lock(&tpg->tpg_lun_mutex);
+
+	spin_lock(&dev->se_port_lock);
+	lun->lun_index = dev->dev_index;
+	rcu_assign_pointer(lun->lun_se_dev, dev);
+	dev->export_count++;
+	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
+	spin_unlock(&dev->se_port_lock);
+
+	if (dev->dev_flags & DF_READ_ONLY)
+		lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	else
+		lun->lun_access = lun_access;
+	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
+	mutex_unlock(&tpg->tpg_lun_mutex);
+
+	return 0;
+
+out_kill_ref:
+	percpu_ref_exit(&lun->lun_ref);
+out:
+	return ret;
+}
+
+void core_tpg_remove_lun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	/*
+	 * rcu_dereference_raw protected by se_lun->lun_group symlink
+	 * reference to se_device->dev_group.
+	 */
+	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+
+	lun->lun_shutdown = true;
+
+	core_clear_lun_from_tpg(lun, tpg);
+	/*
+	 * Wait for any active I/O references to percpu se_lun->lun_ref to
+	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
+	 * logic when referencing a remote target port during ALL_TGT_PT=1
+	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
+	 */
+	transport_clear_lun_ref(lun);
+
+	mutex_lock(&tpg->tpg_lun_mutex);
+	if (lun->lun_se_dev) {
+		target_detach_tg_pt_gp(lun);
+
+		spin_lock(&dev->se_port_lock);
+		list_del(&lun->lun_dev_link);
+		dev->export_count--;
+		rcu_assign_pointer(lun->lun_se_dev, NULL);
+		spin_unlock(&dev->se_port_lock);
+	}
+	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+		hlist_del_rcu(&lun->link);
+
+	lun->lun_shutdown = false;
+	mutex_unlock(&tpg->tpg_lun_mutex);
+
+	percpu_ref_exit(&lun->lun_ref);
+}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
new file mode 100644
index 0000000..a42054e
--- /dev/null
+++ b/drivers/target/target_core_transport.c
@@ -0,0 +1,3192 @@
+/*******************************************************************************
+ * Filename:  target_core_transport.c
+ *
+ * This file contains the Generic Target Engine Core.
+ *
+ * (c) Copyright 2002-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <linux/cdrom.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <asm/unaligned.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/target.h>
+
+static struct workqueue_struct *target_completion_wq;
+static struct kmem_cache *se_sess_cache;
+struct kmem_cache *se_ua_cache;
+struct kmem_cache *t10_pr_reg_cache;
+struct kmem_cache *t10_alua_lu_gp_cache;
+struct kmem_cache *t10_alua_lu_gp_mem_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_cache;
+struct kmem_cache *t10_alua_lba_map_cache;
+struct kmem_cache *t10_alua_lba_map_mem_cache;
+
+static void transport_complete_task_attr(struct se_cmd *cmd);
+static void transport_handle_queue_full(struct se_cmd *cmd,
+		struct se_device *dev);
+static int transport_put_cmd(struct se_cmd *cmd);
+static void target_complete_ok_work(struct work_struct *work);
+
+int init_se_kmem_caches(void)
+{
+	se_sess_cache = kmem_cache_create("se_sess_cache",
+			sizeof(struct se_session), __alignof__(struct se_session),
+			0, NULL);
+	if (!se_sess_cache) {
+		pr_err("kmem_cache_create() for struct se_session"
+				" failed\n");
+		goto out;
+	}
+	se_ua_cache = kmem_cache_create("se_ua_cache",
+			sizeof(struct se_ua), __alignof__(struct se_ua),
+			0, NULL);
+	if (!se_ua_cache) {
+		pr_err("kmem_cache_create() for struct se_ua failed\n");
+		goto out_free_sess_cache;
+	}
+	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
+			sizeof(struct t10_pr_registration),
+			__alignof__(struct t10_pr_registration), 0, NULL);
+	if (!t10_pr_reg_cache) {
+		pr_err("kmem_cache_create() for struct t10_pr_registration"
+				" failed\n");
+		goto out_free_ua_cache;
+	}
+	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
+			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
+			0, NULL);
+	if (!t10_alua_lu_gp_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
+				" failed\n");
+		goto out_free_pr_reg_cache;
+	}
+	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
+			sizeof(struct t10_alua_lu_gp_member),
+			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
+	if (!t10_alua_lu_gp_mem_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
+				"cache failed\n");
+		goto out_free_lu_gp_cache;
+	}
+	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
+			sizeof(struct t10_alua_tg_pt_gp),
+			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
+	if (!t10_alua_tg_pt_gp_cache) {
+		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
+				"cache failed\n");
+		goto out_free_lu_gp_mem_cache;
+	}
+	t10_alua_lba_map_cache = kmem_cache_create(
+			"t10_alua_lba_map_cache",
+			sizeof(struct t10_alua_lba_map),
+			__alignof__(struct t10_alua_lba_map), 0, NULL);
+	if (!t10_alua_lba_map_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lba_map_"
+				"cache failed\n");
+		goto out_free_tg_pt_gp_cache;
+	}
+	t10_alua_lba_map_mem_cache = kmem_cache_create(
+			"t10_alua_lba_map_mem_cache",
+			sizeof(struct t10_alua_lba_map_member),
+			__alignof__(struct t10_alua_lba_map_member), 0, NULL);
+	if (!t10_alua_lba_map_mem_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
+				"cache failed\n");
+		goto out_free_lba_map_cache;
+	}
+
+	target_completion_wq = alloc_workqueue("target_completion",
+					       WQ_MEM_RECLAIM, 0);
+	if (!target_completion_wq)
+		goto out_free_lba_map_mem_cache;
+
+	return 0;
+
+out_free_lba_map_mem_cache:
+	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
+out_free_lba_map_cache:
+	kmem_cache_destroy(t10_alua_lba_map_cache);
+out_free_tg_pt_gp_cache:
+	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+out_free_lu_gp_mem_cache:
+	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+out_free_lu_gp_cache:
+	kmem_cache_destroy(t10_alua_lu_gp_cache);
+out_free_pr_reg_cache:
+	kmem_cache_destroy(t10_pr_reg_cache);
+out_free_ua_cache:
+	kmem_cache_destroy(se_ua_cache);
+out_free_sess_cache:
+	kmem_cache_destroy(se_sess_cache);
+out:
+	return -ENOMEM;
+}
+
+void release_se_kmem_caches(void)
+{
+	destroy_workqueue(target_completion_wq);
+	kmem_cache_destroy(se_sess_cache);
+	kmem_cache_destroy(se_ua_cache);
+	kmem_cache_destroy(t10_pr_reg_cache);
+	kmem_cache_destroy(t10_alua_lu_gp_cache);
+	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+	kmem_cache_destroy(t10_alua_lba_map_cache);
+	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
+}
+
+/* This code ensures unique mib indexes are handed out. */
+static DEFINE_SPINLOCK(scsi_mib_index_lock);
+static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+	u32 new_index;
+
+	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
+
+	spin_lock(&scsi_mib_index_lock);
+	new_index = ++scsi_mib_index[type];
+	spin_unlock(&scsi_mib_index_lock);
+
+	return new_index;
+}
+
+void transport_subsystem_check_init(void)
+{
+	int ret;
+	static int sub_api_initialized;
+
+	if (sub_api_initialized)
+		return;
+
+	ret = request_module("target_core_iblock");
+	if (ret != 0)
+		pr_err("Unable to load target_core_iblock\n");
+
+	ret = request_module("target_core_file");
+	if (ret != 0)
+		pr_err("Unable to load target_core_file\n");
+
+	ret = request_module("target_core_pscsi");
+	if (ret != 0)
+		pr_err("Unable to load target_core_pscsi\n");
+
+	ret = request_module("target_core_user");
+	if (ret != 0)
+		pr_err("Unable to load target_core_user\n");
+
+	sub_api_initialized = 1;
+}
+
+struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
+{
+	struct se_session *se_sess;
+
+	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
+	if (!se_sess) {
+		pr_err("Unable to allocate struct se_session from"
+				" se_sess_cache\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&se_sess->sess_list);
+	INIT_LIST_HEAD(&se_sess->sess_acl_list);
+	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+	INIT_LIST_HEAD(&se_sess->sess_wait_list);
+	spin_lock_init(&se_sess->sess_cmd_lock);
+	kref_init(&se_sess->sess_kref);
+	se_sess->sup_prot_ops = sup_prot_ops;
+
+	return se_sess;
+}
+EXPORT_SYMBOL(transport_init_session);
+
+int transport_alloc_session_tags(struct se_session *se_sess,
+			         unsigned int tag_num, unsigned int tag_size)
+{
+	int rc;
+
+	se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
+					GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+	if (!se_sess->sess_cmd_map) {
+		se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
+		if (!se_sess->sess_cmd_map) {
+			pr_err("Unable to allocate se_sess->sess_cmd_map\n");
+			return -ENOMEM;
+		}
+	}
+
+	rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
+	if (rc < 0) {
+		pr_err("Unable to init se_sess->sess_tag_pool,"
+			" tag_num: %u\n", tag_num);
+		kvfree(se_sess->sess_cmd_map);
+		se_sess->sess_cmd_map = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_alloc_session_tags);
+
+struct se_session *transport_init_session_tags(unsigned int tag_num,
+					       unsigned int tag_size,
+					       enum target_prot_op sup_prot_ops)
+{
+	struct se_session *se_sess;
+	int rc;
+
+	se_sess = transport_init_session(sup_prot_ops);
+	if (IS_ERR(se_sess))
+		return se_sess;
+
+	rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
+	if (rc < 0) {
+		transport_free_session(se_sess);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return se_sess;
+}
+EXPORT_SYMBOL(transport_init_session_tags);
+
+/*
+ * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
+ */
+void __transport_register_session(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct se_session *se_sess,
+	void *fabric_sess_ptr)
+{
+	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
+	unsigned char buf[PR_REG_ISID_LEN];
+
+	se_sess->se_tpg = se_tpg;
+	se_sess->fabric_sess_ptr = fabric_sess_ptr;
+	/*
+	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
+	 *
+	 * Only set for struct se_session's that will actually be moving I/O.
+	 * eg: *NOT* discovery sessions.
+	 */
+	if (se_nacl) {
+		/*
+		 *
+		 * Determine if fabric allows for T10-PI feature bits exposed to
+		 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
+		 *
+		 * If so, then always save prot_type on a per se_node_acl node
+		 * basis and re-instate the previous sess_prot_type to avoid
+		 * disabling PI from below any previously initiator side
+		 * registered LUNs.
+		 */
+		if (se_nacl->saved_prot_type)
+			se_sess->sess_prot_type = se_nacl->saved_prot_type;
+		else if (tfo->tpg_check_prot_fabric_only)
+			se_sess->sess_prot_type = se_nacl->saved_prot_type =
+					tfo->tpg_check_prot_fabric_only(se_tpg);
+		/*
+		 * If the fabric module supports an ISID based TransportID,
+		 * save this value in binary from the fabric I_T Nexus now.
+		 */
+		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
+			memset(&buf[0], 0, PR_REG_ISID_LEN);
+			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
+					&buf[0], PR_REG_ISID_LEN);
+			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
+		}
+
+		spin_lock_irq(&se_nacl->nacl_sess_lock);
+		/*
+		 * The se_nacl->nacl_sess pointer will be set to the
+		 * last active I_T Nexus for each struct se_node_acl.
+		 */
+		se_nacl->nacl_sess = se_sess;
+
+		list_add_tail(&se_sess->sess_acl_list,
+			      &se_nacl->acl_sess_list);
+		spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	}
+	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
+
+	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
+}
+EXPORT_SYMBOL(__transport_register_session);
+
+void transport_register_session(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct se_session *se_sess,
+	void *fabric_sess_ptr)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&se_tpg->session_lock, flags);
+	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
+	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
+}
+EXPORT_SYMBOL(transport_register_session);
+
+static void target_release_session(struct kref *kref)
+{
+	struct se_session *se_sess = container_of(kref,
+			struct se_session, sess_kref);
+	struct se_portal_group *se_tpg = se_sess->se_tpg;
+
+	se_tpg->se_tpg_tfo->close_session(se_sess);
+}
+
+int target_get_session(struct se_session *se_sess)
+{
+	return kref_get_unless_zero(&se_sess->sess_kref);
+}
+EXPORT_SYMBOL(target_get_session);
+
+void target_put_session(struct se_session *se_sess)
+{
+	kref_put(&se_sess->sess_kref, target_release_session);
+}
+EXPORT_SYMBOL(target_put_session);
+
+ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
+{
+	struct se_session *se_sess;
+	ssize_t len = 0;
+
+	spin_lock_bh(&se_tpg->session_lock);
+	list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
+		if (!se_sess->se_node_acl)
+			continue;
+		if (!se_sess->se_node_acl->dynamic_node_acl)
+			continue;
+		if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
+			break;
+
+		len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
+				se_sess->se_node_acl->initiatorname);
+		len += 1; /* Include NULL terminator */
+	}
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	return len;
+}
+EXPORT_SYMBOL(target_show_dynamic_sessions);
+
+static void target_complete_nacl(struct kref *kref)
+{
+	struct se_node_acl *nacl = container_of(kref,
+				struct se_node_acl, acl_kref);
+	struct se_portal_group *se_tpg = nacl->se_tpg;
+
+	if (!nacl->dynamic_stop) {
+		complete(&nacl->acl_free_comp);
+		return;
+	}
+
+	mutex_lock(&se_tpg->acl_node_mutex);
+	list_del_init(&nacl->acl_list);
+	mutex_unlock(&se_tpg->acl_node_mutex);
+
+	core_tpg_wait_for_nacl_pr_ref(nacl);
+	core_free_device_list_for_node(nacl, se_tpg);
+	kfree(nacl);
+}
+
+void target_put_nacl(struct se_node_acl *nacl)
+{
+	kref_put(&nacl->acl_kref, target_complete_nacl);
+}
+EXPORT_SYMBOL(target_put_nacl);
+
+void transport_deregister_session_configfs(struct se_session *se_sess)
+{
+	struct se_node_acl *se_nacl;
+	unsigned long flags;
+	/*
+	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
+	 */
+	se_nacl = se_sess->se_node_acl;
+	if (se_nacl) {
+		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
+		if (se_nacl->acl_stop == 0)
+			list_del(&se_sess->sess_acl_list);
+		/*
+		 * If the session list is empty, then clear the pointer.
+		 * Otherwise, set the struct se_session pointer from the tail
+		 * element of the per struct se_node_acl active session list.
+		 */
+		if (list_empty(&se_nacl->acl_sess_list))
+			se_nacl->nacl_sess = NULL;
+		else {
+			se_nacl->nacl_sess = container_of(
+					se_nacl->acl_sess_list.prev,
+					struct se_session, sess_acl_list);
+		}
+		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
+	}
+}
+EXPORT_SYMBOL(transport_deregister_session_configfs);
+
+void transport_free_session(struct se_session *se_sess)
+{
+	struct se_node_acl *se_nacl = se_sess->se_node_acl;
+
+	/*
+	 * Drop the se_node_acl->nacl_kref obtained from within
+	 * core_tpg_get_initiator_node_acl().
+	 */
+	if (se_nacl) {
+		struct se_portal_group *se_tpg = se_nacl->se_tpg;
+		const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
+		unsigned long flags;
+
+		se_sess->se_node_acl = NULL;
+
+		/*
+		 * Also determine if we need to drop the extra ->cmd_kref if
+		 * it had been previously dynamically generated, and
+		 * the endpoint is not caching dynamic ACLs.
+		 */
+		mutex_lock(&se_tpg->acl_node_mutex);
+		if (se_nacl->dynamic_node_acl &&
+		    !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+			spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
+			if (list_empty(&se_nacl->acl_sess_list))
+				se_nacl->dynamic_stop = true;
+			spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
+
+			if (se_nacl->dynamic_stop)
+				list_del_init(&se_nacl->acl_list);
+		}
+		mutex_unlock(&se_tpg->acl_node_mutex);
+
+		if (se_nacl->dynamic_stop)
+			target_put_nacl(se_nacl);
+
+		target_put_nacl(se_nacl);
+	}
+	if (se_sess->sess_cmd_map) {
+		percpu_ida_destroy(&se_sess->sess_tag_pool);
+		kvfree(se_sess->sess_cmd_map);
+	}
+	kmem_cache_free(se_sess_cache, se_sess);
+}
+EXPORT_SYMBOL(transport_free_session);
+
+void transport_deregister_session(struct se_session *se_sess)
+{
+	struct se_portal_group *se_tpg = se_sess->se_tpg;
+	unsigned long flags;
+
+	if (!se_tpg) {
+		transport_free_session(se_sess);
+		return;
+	}
+
+	spin_lock_irqsave(&se_tpg->session_lock, flags);
+	list_del(&se_sess->sess_list);
+	se_sess->se_tpg = NULL;
+	se_sess->fabric_sess_ptr = NULL;
+	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
+
+	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
+		se_tpg->se_tpg_tfo->get_fabric_name());
+	/*
+	 * If last kref is dropping now for an explicit NodeACL, awake sleeping
+	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
+	 * removal context from within transport_free_session() code.
+	 *
+	 * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
+	 * to release all remaining generate_node_acl=1 created ACL resources.
+	 */
+
+	transport_free_session(se_sess);
+}
+EXPORT_SYMBOL(transport_deregister_session);
+
+static void target_remove_from_state_list(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	unsigned long flags;
+
+	if (!dev)
+		return;
+
+	if (cmd->transport_state & CMD_T_BUSY)
+		return;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	if (cmd->state_active) {
+		list_del(&cmd->state_list);
+		cmd->state_active = false;
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+				    bool write_pending)
+{
+	unsigned long flags;
+
+	if (remove_from_lists) {
+		target_remove_from_state_list(cmd);
+
+		/*
+		 * Clear struct se_cmd->se_lun before the handoff to FE.
+		 */
+		cmd->se_lun = NULL;
+	}
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (write_pending)
+		cmd->t_state = TRANSPORT_WRITE_PENDING;
+
+	/*
+	 * Determine if frontend context caller is requesting the stopping of
+	 * this command for frontend exceptions.
+	 */
+	if (cmd->transport_state & CMD_T_STOP) {
+		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+			__func__, __LINE__, cmd->tag);
+
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+		complete_all(&cmd->t_transport_stop_comp);
+		return 1;
+	}
+
+	cmd->transport_state &= ~CMD_T_ACTIVE;
+	if (remove_from_lists) {
+		/*
+		 * Some fabric modules like tcm_loop can release
+		 * their internally allocated I/O reference now and
+		 * struct se_cmd now.
+		 *
+		 * Fabric modules are expected to return '1' here if the
+		 * se_cmd being passed is released at this point,
+		 * or zero if not being released.
+		 */
+		if (cmd->se_tfo->check_stop_free != NULL) {
+			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+			return cmd->se_tfo->check_stop_free(cmd);
+		}
+	}
+
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+	return 0;
+}
+
+static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+{
+	return transport_cmd_check_stop(cmd, true, false);
+}
+
+static void transport_lun_remove_cmd(struct se_cmd *cmd)
+{
+	struct se_lun *lun = cmd->se_lun;
+
+	if (!lun)
+		return;
+
+	if (cmpxchg(&cmd->lun_ref_active, true, false))
+		percpu_ref_put(&lun->lun_ref);
+}
+
+int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+{
+	bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+	int ret = 0;
+
+	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+		transport_lun_remove_cmd(cmd);
+	/*
+	 * Allow the fabric driver to unmap any resources before
+	 * releasing the descriptor via TFO->release_cmd()
+	 */
+	if (remove)
+		cmd->se_tfo->aborted_task(cmd);
+
+	if (transport_cmd_check_stop_to_fabric(cmd))
+		return 1;
+	if (remove && ack_kref)
+		ret = transport_put_cmd(cmd);
+
+	return ret;
+}
+
+static void target_complete_failure_work(struct work_struct *work)
+{
+	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+
+	transport_generic_request_failure(cmd,
+			TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+}
+
+/*
+ * Used when asking transport to copy Sense Data from the underlying
+ * Linux/SCSI struct scsi_cmnd
+ */
+static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	WARN_ON(!cmd->se_lun);
+
+	if (!dev)
+		return NULL;
+
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+		return NULL;
+
+	cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
+
+	pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
+		dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
+	return cmd->sense_buffer;
+}
+
+void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+{
+	struct se_device *dev = cmd->se_dev;
+	int success = scsi_status == GOOD;
+	unsigned long flags;
+
+	cmd->scsi_status = scsi_status;
+
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	cmd->transport_state &= ~CMD_T_BUSY;
+
+	if (dev && dev->transport->transport_complete) {
+		dev->transport->transport_complete(cmd,
+				cmd->t_data_sg,
+				transport_get_sense_buffer(cmd));
+		if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+			success = 1;
+	}
+
+	/*
+	 * See if we are waiting to complete for an exception condition.
+	 */
+	if (cmd->transport_state & CMD_T_REQUEST_STOP) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		complete(&cmd->task_stop_comp);
+		return;
+	}
+
+	/*
+	 * Check for case where an explicit ABORT_TASK has been received
+	 * and transport_wait_for_tasks() will be waiting for completion..
+	 */
+	if (cmd->transport_state & CMD_T_ABORTED ||
+	    cmd->transport_state & CMD_T_STOP) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		/*
+		 * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
+		 * release se_device->caw_sem obtained by sbc_compare_and_write()
+		 * since target_complete_ok_work() or target_complete_failure_work()
+		 * won't be called to invoke the normal CAW completion callbacks.
+		 */
+		if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+			up(&dev->caw_sem);
+		}
+		complete_all(&cmd->t_transport_stop_comp);
+		return;
+	} else if (!success) {
+		INIT_WORK(&cmd->work, target_complete_failure_work);
+	} else {
+		INIT_WORK(&cmd->work, target_complete_ok_work);
+	}
+
+	cmd->t_state = TRANSPORT_COMPLETE;
+	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	queue_work(target_completion_wq, &cmd->work);
+}
+EXPORT_SYMBOL(target_complete_cmd);
+
+void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
+{
+	if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+		if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+			cmd->residual_count += cmd->data_length - length;
+		} else {
+			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+			cmd->residual_count = cmd->data_length - length;
+		}
+
+		cmd->data_length = length;
+	}
+
+	target_complete_cmd(cmd, scsi_status);
+}
+EXPORT_SYMBOL(target_complete_cmd_with_length);
+
+static void target_add_to_state_list(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	if (!cmd->state_active) {
+		list_add_tail(&cmd->state_list, &dev->state_list);
+		cmd->state_active = true;
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+/*
+ * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
+ */
+static void transport_write_pending_qf(struct se_cmd *cmd);
+static void transport_complete_qf(struct se_cmd *cmd);
+
+void target_qf_do_work(struct work_struct *work)
+{
+	struct se_device *dev = container_of(work, struct se_device,
+					qf_work_queue);
+	LIST_HEAD(qf_cmd_list);
+	struct se_cmd *cmd, *cmd_tmp;
+
+	spin_lock_irq(&dev->qf_cmd_lock);
+	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
+	spin_unlock_irq(&dev->qf_cmd_lock);
+
+	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
+		list_del(&cmd->se_qf_node);
+		atomic_dec_mb(&dev->dev_qf_count);
+
+		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
+			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
+			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
+			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
+			: "UNKNOWN");
+
+		if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
+			transport_write_pending_qf(cmd);
+		else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+			transport_complete_qf(cmd);
+	}
+}
+
+unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
+{
+	switch (cmd->data_direction) {
+	case DMA_NONE:
+		return "NONE";
+	case DMA_FROM_DEVICE:
+		return "READ";
+	case DMA_TO_DEVICE:
+		return "WRITE";
+	case DMA_BIDIRECTIONAL:
+		return "BIDI";
+	default:
+		break;
+	}
+
+	return "UNKNOWN";
+}
+
+void transport_dump_dev_state(
+	struct se_device *dev,
+	char *b,
+	int *bl)
+{
+	*bl += sprintf(b + *bl, "Status: ");
+	if (dev->export_count)
+		*bl += sprintf(b + *bl, "ACTIVATED");
+	else
+		*bl += sprintf(b + *bl, "DEACTIVATED");
+
+	*bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
+	*bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
+		dev->dev_attrib.block_size,
+		dev->dev_attrib.hw_max_sectors);
+	*bl += sprintf(b + *bl, "        ");
+}
+
+void transport_dump_vpd_proto_id(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
+
+	switch (vpd->protocol_identifier) {
+	case 0x00:
+		sprintf(buf+len, "Fibre Channel\n");
+		break;
+	case 0x10:
+		sprintf(buf+len, "Parallel SCSI\n");
+		break;
+	case 0x20:
+		sprintf(buf+len, "SSA\n");
+		break;
+	case 0x30:
+		sprintf(buf+len, "IEEE 1394\n");
+		break;
+	case 0x40:
+		sprintf(buf+len, "SCSI Remote Direct Memory Access"
+				" Protocol\n");
+		break;
+	case 0x50:
+		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
+		break;
+	case 0x60:
+		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
+		break;
+	case 0x70:
+		sprintf(buf+len, "Automation/Drive Interface Transport"
+				" Protocol\n");
+		break;
+	case 0x80:
+		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
+		break;
+	default:
+		sprintf(buf+len, "Unknown 0x%02x\n",
+				vpd->protocol_identifier);
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		pr_debug("%s", buf);
+}
+
+void
+transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * Check if the Protocol Identifier Valid (PIV) bit is set..
+	 *
+	 * from spc3r23.pdf section 7.5.1
+	 */
+	 if (page_83[1] & 0x80) {
+		vpd->protocol_identifier = (page_83[0] & 0xf0);
+		vpd->protocol_identifier_set = 1;
+		transport_dump_vpd_proto_id(vpd, NULL, 0);
+	}
+}
+EXPORT_SYMBOL(transport_set_vpd_proto_id);
+
+int transport_dump_vpd_assoc(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0;
+	int len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Identifier Association: ");
+
+	switch (vpd->association) {
+	case 0x00:
+		sprintf(buf+len, "addressed logical unit\n");
+		break;
+	case 0x10:
+		sprintf(buf+len, "target port\n");
+		break;
+	case 0x20:
+		sprintf(buf+len, "SCSI target device\n");
+		break;
+	default:
+		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		pr_debug("%s", buf);
+
+	return ret;
+}
+
+int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * The VPD identification association..
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 297
+	 */
+	vpd->association = (page_83[1] & 0x30);
+	return transport_dump_vpd_assoc(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_assoc);
+
+int transport_dump_vpd_ident_type(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0;
+	int len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Identifier Type: ");
+
+	switch (vpd->device_identifier_type) {
+	case 0x00:
+		sprintf(buf+len, "Vendor specific\n");
+		break;
+	case 0x01:
+		sprintf(buf+len, "T10 Vendor ID based\n");
+		break;
+	case 0x02:
+		sprintf(buf+len, "EUI-64 based\n");
+		break;
+	case 0x03:
+		sprintf(buf+len, "NAA\n");
+		break;
+	case 0x04:
+		sprintf(buf+len, "Relative target port identifier\n");
+		break;
+	case 0x08:
+		sprintf(buf+len, "SCSI name string\n");
+		break;
+	default:
+		sprintf(buf+len, "Unsupported: 0x%02x\n",
+				vpd->device_identifier_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (p_buf) {
+		if (p_buf_len < strlen(buf)+1)
+			return -EINVAL;
+		strncpy(p_buf, buf, p_buf_len);
+	} else {
+		pr_debug("%s", buf);
+	}
+
+	return ret;
+}
+
+int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * The VPD identifier type..
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 298
+	 */
+	vpd->device_identifier_type = (page_83[1] & 0x0f);
+	return transport_dump_vpd_ident_type(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident_type);
+
+int transport_dump_vpd_ident(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+	switch (vpd->device_identifier_code_set) {
+	case 0x01: /* Binary */
+		snprintf(buf, sizeof(buf),
+			"T10 VPD Binary Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	case 0x02: /* ASCII */
+		snprintf(buf, sizeof(buf),
+			"T10 VPD ASCII Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	case 0x03: /* UTF-8 */
+		snprintf(buf, sizeof(buf),
+			"T10 VPD UTF-8 Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	default:
+		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
+			" 0x%02x", vpd->device_identifier_code_set);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		pr_debug("%s", buf);
+
+	return ret;
+}
+
+int
+transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	static const char hex_str[] = "0123456789abcdef";
+	int j = 0, i = 4; /* offset to start of the identifier */
+
+	/*
+	 * The VPD Code Set (encoding)
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 296
+	 */
+	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
+	switch (vpd->device_identifier_code_set) {
+	case 0x01: /* Binary */
+		vpd->device_identifier[j++] =
+				hex_str[vpd->device_identifier_type];
+		while (i < (4 + page_83[3])) {
+			vpd->device_identifier[j++] =
+				hex_str[(page_83[i] & 0xf0) >> 4];
+			vpd->device_identifier[j++] =
+				hex_str[page_83[i] & 0x0f];
+			i++;
+		}
+		break;
+	case 0x02: /* ASCII */
+	case 0x03: /* UTF-8 */
+		while (i < (4 + page_83[3]))
+			vpd->device_identifier[j++] = page_83[i++];
+		break;
+	default:
+		break;
+	}
+
+	return transport_dump_vpd_ident(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident);
+
+static sense_reason_t
+target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
+			       unsigned int size)
+{
+	u32 mtl;
+
+	if (!cmd->se_tfo->max_data_sg_nents)
+		return TCM_NO_SENSE;
+	/*
+	 * Check if fabric enforced maximum SGL entries per I/O descriptor
+	 * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
+	 * residual_count and reduce original cmd->data_length to maximum
+	 * length based on single PAGE_SIZE entry scatter-lists.
+	 */
+	mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
+	if (cmd->data_length > mtl) {
+		/*
+		 * If an existing CDB overflow is present, calculate new residual
+		 * based on CDB size minus fabric maximum transfer length.
+		 *
+		 * If an existing CDB underflow is present, calculate new residual
+		 * based on original cmd->data_length minus fabric maximum transfer
+		 * length.
+		 *
+		 * Otherwise, set the underflow residual based on cmd->data_length
+		 * minus fabric maximum transfer length.
+		 */
+		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+			cmd->residual_count = (size - mtl);
+		} else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+			u32 orig_dl = size + cmd->residual_count;
+			cmd->residual_count = (orig_dl - mtl);
+		} else {
+			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+			cmd->residual_count = (cmd->data_length - mtl);
+		}
+		cmd->data_length = mtl;
+		/*
+		 * Reset sbc_check_prot() calculated protection payload
+		 * length based upon the new smaller MTL.
+		 */
+		if (cmd->prot_length) {
+			u32 sectors = (mtl / dev->dev_attrib.block_size);
+			cmd->prot_length = dev->prot_length * sectors;
+		}
+	}
+	return TCM_NO_SENSE;
+}
+
+sense_reason_t
+target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	if (cmd->unknown_data_length) {
+		cmd->data_length = size;
+	} else if (size != cmd->data_length) {
+		pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
+			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
+			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
+				cmd->data_length, size, cmd->t_task_cdb[0]);
+
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+				pr_err_ratelimited("Rejecting underflow/overflow"
+						   " for WRITE data CDB\n");
+				return TCM_INVALID_CDB_FIELD;
+			}
+			/*
+			 * Some fabric drivers like iscsi-target still expect to
+			 * always reject overflow writes.  Reject this case until
+			 * full fabric driver level support for overflow writes
+			 * is introduced tree-wide.
+			 */
+			if (size > cmd->data_length) {
+				pr_err_ratelimited("Rejecting overflow for"
+						   " WRITE control CDB\n");
+				return TCM_INVALID_CDB_FIELD;
+			}
+		}
+		/*
+		 * Reject READ_* or WRITE_* with overflow/underflow for
+		 * type SCF_SCSI_DATA_CDB.
+		 */
+		if (dev->dev_attrib.block_size != 512)  {
+			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
+				" CDB on non 512-byte sector setup subsystem"
+				" plugin: %s\n", dev->transport->name);
+			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+			return TCM_INVALID_CDB_FIELD;
+		}
+		/*
+		 * For the overflow case keep the existing fabric provided
+		 * ->data_length.  Otherwise for the underflow case, reset
+		 * ->data_length to the smaller SCSI expected data transfer
+		 * length.
+		 */
+		if (size > cmd->data_length) {
+			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+			cmd->residual_count = (size - cmd->data_length);
+		} else {
+			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+			cmd->residual_count = (cmd->data_length - size);
+			cmd->data_length = size;
+		}
+	}
+
+	return target_check_max_data_sg_nents(cmd, dev, size);
+
+}
+
+/*
+ * Used by fabric modules containing a local struct se_cmd within their
+ * fabric dependent per I/O descriptor.
+ *
+ * Preserves the value of @cmd->tag.
+ */
+void transport_init_se_cmd(
+	struct se_cmd *cmd,
+	const struct target_core_fabric_ops *tfo,
+	struct se_session *se_sess,
+	u32 data_length,
+	int data_direction,
+	int task_attr,
+	unsigned char *sense_buffer)
+{
+	INIT_LIST_HEAD(&cmd->se_delayed_node);
+	INIT_LIST_HEAD(&cmd->se_qf_node);
+	INIT_LIST_HEAD(&cmd->se_cmd_list);
+	INIT_LIST_HEAD(&cmd->state_list);
+	init_completion(&cmd->t_transport_stop_comp);
+	init_completion(&cmd->cmd_wait_comp);
+	init_completion(&cmd->task_stop_comp);
+	spin_lock_init(&cmd->t_state_lock);
+	kref_init(&cmd->cmd_kref);
+	cmd->transport_state = CMD_T_DEV_ACTIVE;
+
+	cmd->se_tfo = tfo;
+	cmd->se_sess = se_sess;
+	cmd->data_length = data_length;
+	cmd->data_direction = data_direction;
+	cmd->sam_task_attr = task_attr;
+	cmd->sense_buffer = sense_buffer;
+
+	cmd->state_active = false;
+}
+EXPORT_SYMBOL(transport_init_se_cmd);
+
+static sense_reason_t
+transport_check_alloc_task_attr(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	/*
+	 * Check if SAM Task Attribute emulation is enabled for this
+	 * struct se_device storage object
+	 */
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+		return 0;
+
+	if (cmd->sam_task_attr == TCM_ACA_TAG) {
+		pr_debug("SAM Task Attribute ACA"
+			" emulation is not supported\n");
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	return 0;
+}
+
+sense_reason_t
+target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
+{
+	struct se_device *dev = cmd->se_dev;
+	sense_reason_t ret;
+
+	/*
+	 * Ensure that the received CDB is less than the max (252 + 8) bytes
+	 * for VARIABLE_LENGTH_CMD
+	 */
+	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
+		pr_err("Received SCSI CDB with command_size: %d that"
+			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+		return TCM_INVALID_CDB_FIELD;
+	}
+	/*
+	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
+	 * allocate the additional extended CDB buffer now..  Otherwise
+	 * setup the pointer from __t_task_cdb to t_task_cdb.
+	 */
+	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
+		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
+						GFP_KERNEL);
+		if (!cmd->t_task_cdb) {
+			pr_err("Unable to allocate cmd->t_task_cdb"
+				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
+				scsi_command_size(cdb),
+				(unsigned long)sizeof(cmd->__t_task_cdb));
+			return TCM_OUT_OF_RESOURCES;
+		}
+	} else
+		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
+	/*
+	 * Copy the original CDB into cmd->
+	 */
+	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
+
+	trace_target_sequencer_start(cmd);
+
+	ret = dev->transport->parse_cdb(cmd);
+	if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
+		pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
+				    cmd->se_tfo->get_fabric_name(),
+				    cmd->se_sess->se_node_acl->initiatorname,
+				    cmd->t_task_cdb[0]);
+	if (ret)
+		return ret;
+
+	ret = transport_check_alloc_task_attr(cmd);
+	if (ret)
+		return ret;
+
+	cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
+	atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
+	return 0;
+}
+EXPORT_SYMBOL(target_setup_cmd_from_cdb);
+
+/*
+ * Used by fabric module frontends to queue tasks directly.
+ * Many only be used from process context only
+ */
+int transport_handle_cdb_direct(
+	struct se_cmd *cmd)
+{
+	sense_reason_t ret;
+
+	if (!cmd->se_lun) {
+		dump_stack();
+		pr_err("cmd->se_lun is NULL\n");
+		return -EINVAL;
+	}
+	if (in_interrupt()) {
+		dump_stack();
+		pr_err("transport_generic_handle_cdb cannot be called"
+				" from interrupt context\n");
+		return -EINVAL;
+	}
+	/*
+	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
+	 * outstanding descriptors are handled correctly during shutdown via
+	 * transport_wait_for_tasks()
+	 *
+	 * Also, we don't take cmd->t_state_lock here as we only expect
+	 * this to be called for initial descriptor submission.
+	 */
+	cmd->t_state = TRANSPORT_NEW_CMD;
+	cmd->transport_state |= CMD_T_ACTIVE;
+
+	/*
+	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
+	 * so follow TRANSPORT_NEW_CMD processing thread context usage
+	 * and call transport_generic_request_failure() if necessary..
+	 */
+	ret = transport_generic_new_cmd(cmd);
+	if (ret)
+		transport_generic_request_failure(cmd, ret);
+	return 0;
+}
+EXPORT_SYMBOL(transport_handle_cdb_direct);
+
+sense_reason_t
+transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
+		u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+{
+	if (!sgl || !sgl_count)
+		return 0;
+
+	/*
+	 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+	 * scatterlists already have been set to follow what the fabric
+	 * passes for the original expected data transfer length.
+	 */
+	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+		pr_warn("Rejecting SCSI DATA overflow for fabric using"
+			" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	cmd->t_data_sg = sgl;
+	cmd->t_data_nents = sgl_count;
+	cmd->t_bidi_data_sg = sgl_bidi;
+	cmd->t_bidi_data_nents = sgl_bidi_count;
+
+	cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+	return 0;
+}
+
+/*
+ * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
+ * 			 se_cmd + use pre-allocated SGL memory.
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @cdb: pointer to SCSI CDB
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @data_length: fabric expected data transfer length
+ * @task_addr: SAM task attribute
+ * @data_dir: DMA data direction
+ * @flags: flags for command submission from target_sc_flags_tables
+ * @sgl: struct scatterlist memory for unidirectional mapping
+ * @sgl_count: scatterlist count for unidirectional mapping
+ * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
+ * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
+ *
+ * Task tags are supported if the caller has set @se_cmd->tag.
+ *
+ * Returns non zero to signal active I/O shutdown failure.  All other
+ * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
+ * but still return zero here.
+ *
+ * This may only be called from process context, and also currently
+ * assumes internal allocation of fabric payload buffer by target-core.
+ */
+int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
+		unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
+		u32 data_length, int task_attr, int data_dir, int flags,
+		struct scatterlist *sgl, u32 sgl_count,
+		struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+		struct scatterlist *sgl_prot, u32 sgl_prot_count)
+{
+	struct se_portal_group *se_tpg;
+	sense_reason_t rc;
+	int ret;
+
+	se_tpg = se_sess->se_tpg;
+	BUG_ON(!se_tpg);
+	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
+	BUG_ON(in_interrupt());
+	/*
+	 * Initialize se_cmd for target operation.  From this point
+	 * exceptions are handled by sending exception status via
+	 * target_core_fabric_ops->queue_status() callback
+	 */
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+				data_length, data_dir, task_attr, sense);
+	if (flags & TARGET_SCF_UNKNOWN_SIZE)
+		se_cmd->unknown_data_length = 1;
+	/*
+	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
+	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
+	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+	 * kref_put() to happen during fabric packet acknowledgement.
+	 */
+	ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+	if (ret)
+		return ret;
+	/*
+	 * Signal bidirectional data payloads to target-core
+	 */
+	if (flags & TARGET_SCF_BIDI_OP)
+		se_cmd->se_cmd_flags |= SCF_BIDI;
+	/*
+	 * Locate se_lun pointer and attach it to struct se_cmd
+	 */
+	rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
+	if (rc) {
+		transport_send_check_condition_and_sense(se_cmd, rc, 0);
+		target_put_sess_cmd(se_cmd);
+		return 0;
+	}
+
+	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
+	if (rc != 0) {
+		transport_generic_request_failure(se_cmd, rc);
+		return 0;
+	}
+
+	/*
+	 * Save pointers for SGLs containing protection information,
+	 * if present.
+	 */
+	if (sgl_prot_count) {
+		se_cmd->t_prot_sg = sgl_prot;
+		se_cmd->t_prot_nents = sgl_prot_count;
+		se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
+	}
+
+	/*
+	 * When a non zero sgl_count has been passed perform SGL passthrough
+	 * mapping for pre-allocated fabric memory instead of having target
+	 * core perform an internal SGL allocation..
+	 */
+	if (sgl_count != 0) {
+		BUG_ON(!sgl);
+
+		/*
+		 * A work-around for tcm_loop as some userspace code via
+		 * scsi-generic do not memset their associated read buffers,
+		 * so go ahead and do that here for type non-data CDBs.  Also
+		 * note that this is currently guaranteed to be a single SGL
+		 * for this case by target core in target_setup_cmd_from_cdb()
+		 * -> transport_generic_cmd_sequencer().
+		 */
+		if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
+		     se_cmd->data_direction == DMA_FROM_DEVICE) {
+			unsigned char *buf = NULL;
+
+			if (sgl)
+				buf = kmap(sg_page(sgl)) + sgl->offset;
+
+			if (buf) {
+				memset(buf, 0, sgl->length);
+				kunmap(sg_page(sgl));
+			}
+		}
+
+		rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
+				sgl_bidi, sgl_bidi_count);
+		if (rc != 0) {
+			transport_generic_request_failure(se_cmd, rc);
+			return 0;
+		}
+	}
+
+	/*
+	 * Check if we need to delay processing because of ALUA
+	 * Active/NonOptimized primary access state..
+	 */
+	core_alua_check_nonop_delay(se_cmd);
+
+	transport_handle_cdb_direct(se_cmd);
+	return 0;
+}
+EXPORT_SYMBOL(target_submit_cmd_map_sgls);
+
+/*
+ * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @cdb: pointer to SCSI CDB
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @data_length: fabric expected data transfer length
+ * @task_addr: SAM task attribute
+ * @data_dir: DMA data direction
+ * @flags: flags for command submission from target_sc_flags_tables
+ *
+ * Task tags are supported if the caller has set @se_cmd->tag.
+ *
+ * Returns non zero to signal active I/O shutdown failure.  All other
+ * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
+ * but still return zero here.
+ *
+ * This may only be called from process context, and also currently
+ * assumes internal allocation of fabric payload buffer by target-core.
+ *
+ * It also assumes interal target core SGL memory allocation.
+ */
+int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+		unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
+		u32 data_length, int task_attr, int data_dir, int flags)
+{
+	return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
+			unpacked_lun, data_length, task_attr, data_dir,
+			flags, NULL, 0, NULL, 0, NULL, 0);
+}
+EXPORT_SYMBOL(target_submit_cmd);
+
+static void target_complete_tmr_failure(struct work_struct *work)
+{
+	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
+
+	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+
+	transport_cmd_check_stop_to_fabric(se_cmd);
+}
+
+/**
+ * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
+ *                     for TMR CDBs
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @fabric_context: fabric context for TMR req
+ * @tm_type: Type of TM request
+ * @gfp: gfp type for caller
+ * @tag: referenced task tag for TMR_ABORT_TASK
+ * @flags: submit cmd flags
+ *
+ * Callable from all contexts.
+ **/
+
+int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
+		unsigned char *sense, u64 unpacked_lun,
+		void *fabric_tmr_ptr, unsigned char tm_type,
+		gfp_t gfp, unsigned int tag, int flags)
+{
+	struct se_portal_group *se_tpg;
+	int ret;
+
+	se_tpg = se_sess->se_tpg;
+	BUG_ON(!se_tpg);
+
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+			      0, DMA_NONE, TCM_SIMPLE_TAG, sense);
+	/*
+	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
+	 * allocation failure.
+	 */
+	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
+	if (ret < 0)
+		return -ENOMEM;
+
+	if (tm_type == TMR_ABORT_TASK)
+		se_cmd->se_tmr_req->ref_task_tag = tag;
+
+	/* See target_submit_cmd for commentary */
+	ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+	if (ret) {
+		core_tmr_release_req(se_cmd->se_tmr_req);
+		return ret;
+	}
+
+	ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
+	if (ret) {
+		/*
+		 * For callback during failure handling, push this work off
+		 * to process context with TMR_LUN_DOES_NOT_EXIST status.
+		 */
+		INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
+		schedule_work(&se_cmd->work);
+		return 0;
+	}
+	transport_generic_handle_tmr(se_cmd);
+	return 0;
+}
+EXPORT_SYMBOL(target_submit_tmr);
+
+/*
+ * If the cmd is active, request it to be stopped and sleep until it
+ * has completed.
+ */
+bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
+	__releases(&cmd->t_state_lock)
+	__acquires(&cmd->t_state_lock)
+{
+	bool was_active = false;
+
+	if (cmd->transport_state & CMD_T_BUSY) {
+		cmd->transport_state |= CMD_T_REQUEST_STOP;
+		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+		pr_debug("cmd %p waiting to complete\n", cmd);
+		wait_for_completion(&cmd->task_stop_comp);
+		pr_debug("cmd %p stopped successfully\n", cmd);
+
+		spin_lock_irqsave(&cmd->t_state_lock, *flags);
+		cmd->transport_state &= ~CMD_T_REQUEST_STOP;
+		cmd->transport_state &= ~CMD_T_BUSY;
+		was_active = true;
+	}
+
+	return was_active;
+}
+
+/*
+ * Handle SAM-esque emulation for generic transport request failures.
+ */
+void transport_generic_request_failure(struct se_cmd *cmd,
+		sense_reason_t sense_reason)
+{
+	int ret = 0, post_ret = 0;
+
+	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
+		" CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
+	pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
+		cmd->se_tfo->get_cmd_state(cmd),
+		cmd->t_state, sense_reason);
+	pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
+		(cmd->transport_state & CMD_T_ACTIVE) != 0,
+		(cmd->transport_state & CMD_T_STOP) != 0,
+		(cmd->transport_state & CMD_T_SENT) != 0);
+
+	/*
+	 * For SAM Task Attribute emulation for failed struct se_cmd
+	 */
+	transport_complete_task_attr(cmd);
+	/*
+	 * Handle special case for COMPARE_AND_WRITE failure, where the
+	 * callback is expected to drop the per device ->caw_sem.
+	 */
+	if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+	     cmd->transport_complete_callback)
+		cmd->transport_complete_callback(cmd, false, &post_ret);
+
+	switch (sense_reason) {
+	case TCM_NON_EXISTENT_LUN:
+	case TCM_UNSUPPORTED_SCSI_OPCODE:
+	case TCM_INVALID_CDB_FIELD:
+	case TCM_INVALID_PARAMETER_LIST:
+	case TCM_PARAMETER_LIST_LENGTH_ERROR:
+	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+	case TCM_UNKNOWN_MODE_PAGE:
+	case TCM_WRITE_PROTECTED:
+	case TCM_ADDRESS_OUT_OF_RANGE:
+	case TCM_CHECK_CONDITION_ABORT_CMD:
+	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+	case TCM_CHECK_CONDITION_NOT_READY:
+	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+	case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
+		break;
+	case TCM_OUT_OF_RESOURCES:
+		sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		break;
+	case TCM_RESERVATION_CONFLICT:
+		/*
+		 * No SENSE Data payload for this case, set SCSI Status
+		 * and queue the response to $FABRIC_MOD.
+		 *
+		 * Uses linux/include/scsi/scsi.h SAM status codes defs
+		 */
+		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+		/*
+		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+		 * CONFLICT STATUS.
+		 *
+		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+		 */
+		if (cmd->se_sess &&
+		    cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
+			target_ua_allocate_lun(cmd->se_sess->se_node_acl,
+					       cmd->orig_fe_lun, 0x2C,
+					ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+		}
+		trace_target_cmd_complete(cmd);
+		ret = cmd->se_tfo->queue_status(cmd);
+		if (ret == -EAGAIN || ret == -ENOMEM)
+			goto queue_full;
+		goto check_stop;
+	default:
+		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
+			cmd->t_task_cdb[0], sense_reason);
+		sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		break;
+	}
+
+	ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
+	if (ret == -EAGAIN || ret == -ENOMEM)
+		goto queue_full;
+
+check_stop:
+	transport_lun_remove_cmd(cmd);
+	transport_cmd_check_stop_to_fabric(cmd);
+	return;
+
+queue_full:
+	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
+	transport_handle_queue_full(cmd, cmd->se_dev);
+}
+EXPORT_SYMBOL(transport_generic_request_failure);
+
+void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
+{
+	sense_reason_t ret;
+
+	if (!cmd->execute_cmd) {
+		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		goto err;
+	}
+	if (do_checks) {
+		/*
+		 * Check for an existing UNIT ATTENTION condition after
+		 * target_handle_task_attr() has done SAM task attr
+		 * checking, and possibly have already defered execution
+		 * out to target_restart_delayed_cmds() context.
+		 */
+		ret = target_scsi3_ua_check(cmd);
+		if (ret)
+			goto err;
+
+		ret = target_alua_state_check(cmd);
+		if (ret)
+			goto err;
+
+		ret = target_check_reservation(cmd);
+		if (ret) {
+			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+			goto err;
+		}
+	}
+
+	ret = cmd->execute_cmd(cmd);
+	if (!ret)
+		return;
+err:
+	spin_lock_irq(&cmd->t_state_lock);
+	cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+	spin_unlock_irq(&cmd->t_state_lock);
+
+	transport_generic_request_failure(cmd, ret);
+}
+
+static int target_write_prot_action(struct se_cmd *cmd)
+{
+	u32 sectors;
+	/*
+	 * Perform WRITE_INSERT of PI using software emulation when backend
+	 * device has PI enabled, if the transport has not already generated
+	 * PI using hardware WRITE_INSERT offload.
+	 */
+	switch (cmd->prot_op) {
+	case TARGET_PROT_DOUT_INSERT:
+		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
+			sbc_dif_generate(cmd);
+		break;
+	case TARGET_PROT_DOUT_STRIP:
+		if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
+			break;
+
+		sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
+		cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
+					     sectors, 0, cmd->t_prot_sg, 0);
+		if (unlikely(cmd->pi_err)) {
+			spin_lock_irq(&cmd->t_state_lock);
+			cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+			spin_unlock_irq(&cmd->t_state_lock);
+			transport_generic_request_failure(cmd, cmd->pi_err);
+			return -1;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static bool target_handle_task_attr(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+		return false;
+
+	cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
+
+	/*
+	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
+	 * to allow the passed struct se_cmd list of tasks to the front of the list.
+	 */
+	switch (cmd->sam_task_attr) {
+	case TCM_HEAD_TAG:
+		pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
+			 cmd->t_task_cdb[0]);
+		return false;
+	case TCM_ORDERED_TAG:
+		atomic_inc_mb(&dev->dev_ordered_sync);
+
+		pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
+			 cmd->t_task_cdb[0]);
+
+		/*
+		 * Execute an ORDERED command if no other older commands
+		 * exist that need to be completed first.
+		 */
+		if (!atomic_read(&dev->simple_cmds))
+			return false;
+		break;
+	default:
+		/*
+		 * For SIMPLE and UNTAGGED Task Attribute commands
+		 */
+		atomic_inc_mb(&dev->simple_cmds);
+		break;
+	}
+
+	if (atomic_read(&dev->dev_ordered_sync) == 0)
+		return false;
+
+	spin_lock(&dev->delayed_cmd_lock);
+	list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
+	spin_unlock(&dev->delayed_cmd_lock);
+
+	pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
+		cmd->t_task_cdb[0], cmd->sam_task_attr);
+	return true;
+}
+
+static int __transport_check_aborted_status(struct se_cmd *, int);
+
+void target_execute_cmd(struct se_cmd *cmd)
+{
+	/*
+	 * Determine if frontend context caller is requesting the stopping of
+	 * this command for frontend exceptions.
+	 *
+	 * If the received CDB has aleady been aborted stop processing it here.
+	 */
+	spin_lock_irq(&cmd->t_state_lock);
+	if (__transport_check_aborted_status(cmd, 1)) {
+		spin_unlock_irq(&cmd->t_state_lock);
+		return;
+	}
+	if (cmd->transport_state & CMD_T_STOP) {
+		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+			__func__, __LINE__, cmd->tag);
+
+		spin_unlock_irq(&cmd->t_state_lock);
+		complete_all(&cmd->t_transport_stop_comp);
+		return;
+	}
+
+	cmd->t_state = TRANSPORT_PROCESSING;
+	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+	spin_unlock_irq(&cmd->t_state_lock);
+
+	if (target_write_prot_action(cmd))
+		return;
+
+	if (target_handle_task_attr(cmd)) {
+		spin_lock_irq(&cmd->t_state_lock);
+		cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
+		spin_unlock_irq(&cmd->t_state_lock);
+		return;
+	}
+
+	__target_execute_cmd(cmd, true);
+}
+EXPORT_SYMBOL(target_execute_cmd);
+
+/*
+ * Process all commands up to the last received ORDERED task attribute which
+ * requires another blocking boundary
+ */
+static void target_restart_delayed_cmds(struct se_device *dev)
+{
+	for (;;) {
+		struct se_cmd *cmd;
+
+		spin_lock(&dev->delayed_cmd_lock);
+		if (list_empty(&dev->delayed_cmd_list)) {
+			spin_unlock(&dev->delayed_cmd_lock);
+			break;
+		}
+
+		cmd = list_entry(dev->delayed_cmd_list.next,
+				 struct se_cmd, se_delayed_node);
+		list_del(&cmd->se_delayed_node);
+		spin_unlock(&dev->delayed_cmd_lock);
+
+		__target_execute_cmd(cmd, true);
+
+		if (cmd->sam_task_attr == TCM_ORDERED_TAG)
+			break;
+	}
+}
+
+/*
+ * Called from I/O completion to determine which dormant/delayed
+ * and ordered cmds need to have their tasks added to the execution queue.
+ */
+static void transport_complete_task_attr(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+		return;
+
+	if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
+		goto restart;
+
+	if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
+		atomic_dec_mb(&dev->simple_cmds);
+		dev->dev_cur_ordered_id++;
+		pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
+			 dev->dev_cur_ordered_id);
+	} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
+		dev->dev_cur_ordered_id++;
+		pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
+			 dev->dev_cur_ordered_id);
+	} else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
+		atomic_dec_mb(&dev->dev_ordered_sync);
+
+		dev->dev_cur_ordered_id++;
+		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
+			 dev->dev_cur_ordered_id);
+	}
+restart:
+	target_restart_delayed_cmds(dev);
+}
+
+static void transport_complete_qf(struct se_cmd *cmd)
+{
+	int ret = 0;
+
+	transport_complete_task_attr(cmd);
+
+	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+		trace_target_cmd_complete(cmd);
+		ret = cmd->se_tfo->queue_status(cmd);
+		goto out;
+	}
+
+	switch (cmd->data_direction) {
+	case DMA_FROM_DEVICE:
+		trace_target_cmd_complete(cmd);
+		ret = cmd->se_tfo->queue_data_in(cmd);
+		break;
+	case DMA_TO_DEVICE:
+		if (cmd->se_cmd_flags & SCF_BIDI) {
+			ret = cmd->se_tfo->queue_data_in(cmd);
+			break;
+		}
+		/* Fall through for DMA_TO_DEVICE */
+	case DMA_NONE:
+		trace_target_cmd_complete(cmd);
+		ret = cmd->se_tfo->queue_status(cmd);
+		break;
+	default:
+		break;
+	}
+
+out:
+	if (ret < 0) {
+		transport_handle_queue_full(cmd, cmd->se_dev);
+		return;
+	}
+	transport_lun_remove_cmd(cmd);
+	transport_cmd_check_stop_to_fabric(cmd);
+}
+
+static void transport_handle_queue_full(
+	struct se_cmd *cmd,
+	struct se_device *dev)
+{
+	spin_lock_irq(&dev->qf_cmd_lock);
+	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
+	atomic_inc_mb(&dev->dev_qf_count);
+	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
+
+	schedule_work(&cmd->se_dev->qf_work_queue);
+}
+
+static bool target_read_prot_action(struct se_cmd *cmd)
+{
+	switch (cmd->prot_op) {
+	case TARGET_PROT_DIN_STRIP:
+		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
+			u32 sectors = cmd->data_length >>
+				  ilog2(cmd->se_dev->dev_attrib.block_size);
+
+			cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
+						     sectors, 0, cmd->t_prot_sg,
+						     0);
+			if (cmd->pi_err)
+				return true;
+		}
+		break;
+	case TARGET_PROT_DIN_INSERT:
+		if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
+			break;
+
+		sbc_dif_generate(cmd);
+		break;
+	default:
+		break;
+	}
+
+	return false;
+}
+
+static void target_complete_ok_work(struct work_struct *work)
+{
+	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+	int ret;
+
+	/*
+	 * Check if we need to move delayed/dormant tasks from cmds on the
+	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
+	 * Attribute.
+	 */
+	transport_complete_task_attr(cmd);
+
+	/*
+	 * Check to schedule QUEUE_FULL work, or execute an existing
+	 * cmd->transport_qf_callback()
+	 */
+	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
+		schedule_work(&cmd->se_dev->qf_work_queue);
+
+	/*
+	 * Check if we need to send a sense buffer from
+	 * the struct se_cmd in question.
+	 */
+	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+		WARN_ON(!cmd->scsi_status);
+		ret = transport_send_check_condition_and_sense(
+					cmd, 0, 1);
+		if (ret == -EAGAIN || ret == -ENOMEM)
+			goto queue_full;
+
+		transport_lun_remove_cmd(cmd);
+		transport_cmd_check_stop_to_fabric(cmd);
+		return;
+	}
+	/*
+	 * Check for a callback, used by amongst other things
+	 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
+	 */
+	if (cmd->transport_complete_callback) {
+		sense_reason_t rc;
+		bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
+		bool zero_dl = !(cmd->data_length);
+		int post_ret = 0;
+
+		rc = cmd->transport_complete_callback(cmd, true, &post_ret);
+		if (!rc && !post_ret) {
+			if (caw && zero_dl)
+				goto queue_rsp;
+
+			return;
+		} else if (rc) {
+			ret = transport_send_check_condition_and_sense(cmd,
+						rc, 0);
+			if (ret == -EAGAIN || ret == -ENOMEM)
+				goto queue_full;
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop_to_fabric(cmd);
+			return;
+		}
+	}
+
+queue_rsp:
+	switch (cmd->data_direction) {
+	case DMA_FROM_DEVICE:
+		atomic_long_add(cmd->data_length,
+				&cmd->se_lun->lun_stats.tx_data_octets);
+		/*
+		 * Perform READ_STRIP of PI using software emulation when
+		 * backend had PI enabled, if the transport will not be
+		 * performing hardware READ_STRIP offload.
+		 */
+		if (target_read_prot_action(cmd)) {
+			ret = transport_send_check_condition_and_sense(cmd,
+						cmd->pi_err, 0);
+			if (ret == -EAGAIN || ret == -ENOMEM)
+				goto queue_full;
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop_to_fabric(cmd);
+			return;
+		}
+
+		trace_target_cmd_complete(cmd);
+		ret = cmd->se_tfo->queue_data_in(cmd);
+		if (ret == -EAGAIN || ret == -ENOMEM)
+			goto queue_full;
+		break;
+	case DMA_TO_DEVICE:
+		atomic_long_add(cmd->data_length,
+				&cmd->se_lun->lun_stats.rx_data_octets);
+		/*
+		 * Check if we need to send READ payload for BIDI-COMMAND
+		 */
+		if (cmd->se_cmd_flags & SCF_BIDI) {
+			atomic_long_add(cmd->data_length,
+					&cmd->se_lun->lun_stats.tx_data_octets);
+			ret = cmd->se_tfo->queue_data_in(cmd);
+			if (ret == -EAGAIN || ret == -ENOMEM)
+				goto queue_full;
+			break;
+		}
+		/* Fall through for DMA_TO_DEVICE */
+	case DMA_NONE:
+		trace_target_cmd_complete(cmd);
+		ret = cmd->se_tfo->queue_status(cmd);
+		if (ret == -EAGAIN || ret == -ENOMEM)
+			goto queue_full;
+		break;
+	default:
+		break;
+	}
+
+	transport_lun_remove_cmd(cmd);
+	transport_cmd_check_stop_to_fabric(cmd);
+	return;
+
+queue_full:
+	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
+		" data_direction: %d\n", cmd, cmd->data_direction);
+	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
+	transport_handle_queue_full(cmd, cmd->se_dev);
+}
+
+static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
+{
+	struct scatterlist *sg;
+	int count;
+
+	for_each_sg(sgl, sg, nents, count)
+		__free_page(sg_page(sg));
+
+	kfree(sgl);
+}
+
+static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
+{
+	/*
+	 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
+	 * emulation, and free + reset pointers if necessary..
+	 */
+	if (!cmd->t_data_sg_orig)
+		return;
+
+	kfree(cmd->t_data_sg);
+	cmd->t_data_sg = cmd->t_data_sg_orig;
+	cmd->t_data_sg_orig = NULL;
+	cmd->t_data_nents = cmd->t_data_nents_orig;
+	cmd->t_data_nents_orig = 0;
+}
+
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
+		transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+		cmd->t_prot_sg = NULL;
+		cmd->t_prot_nents = 0;
+	}
+
+	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+		/*
+		 * Release special case READ buffer payload required for
+		 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
+		 */
+		if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+			transport_free_sgl(cmd->t_bidi_data_sg,
+					   cmd->t_bidi_data_nents);
+			cmd->t_bidi_data_sg = NULL;
+			cmd->t_bidi_data_nents = 0;
+		}
+		transport_reset_sgl_orig(cmd);
+		return;
+	}
+	transport_reset_sgl_orig(cmd);
+
+	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
+	cmd->t_data_sg = NULL;
+	cmd->t_data_nents = 0;
+
+	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
+	cmd->t_bidi_data_sg = NULL;
+	cmd->t_bidi_data_nents = 0;
+}
+
+/**
+ * transport_put_cmd - release a reference to a command
+ * @cmd:       command to release
+ *
+ * This routine releases our reference to the command and frees it if possible.
+ */
+static int transport_put_cmd(struct se_cmd *cmd)
+{
+	BUG_ON(!cmd->se_tfo);
+	/*
+	 * If this cmd has been setup with target_get_sess_cmd(), drop
+	 * the kref and call ->release_cmd() in kref callback.
+	 */
+	return target_put_sess_cmd(cmd);
+}
+
+void *transport_kmap_data_sg(struct se_cmd *cmd)
+{
+	struct scatterlist *sg = cmd->t_data_sg;
+	struct page **pages;
+	int i;
+
+	/*
+	 * We need to take into account a possible offset here for fabrics like
+	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
+	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
+	 */
+	if (!cmd->t_data_nents)
+		return NULL;
+
+	BUG_ON(!sg);
+	if (cmd->t_data_nents == 1)
+		return kmap(sg_page(sg)) + sg->offset;
+
+	/* >1 page. use vmap */
+	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
+	if (!pages)
+		return NULL;
+
+	/* convert sg[] to pages[] */
+	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
+		pages[i] = sg_page(sg);
+	}
+
+	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
+	kfree(pages);
+	if (!cmd->t_data_vmap)
+		return NULL;
+
+	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
+}
+EXPORT_SYMBOL(transport_kmap_data_sg);
+
+void transport_kunmap_data_sg(struct se_cmd *cmd)
+{
+	if (!cmd->t_data_nents) {
+		return;
+	} else if (cmd->t_data_nents == 1) {
+		kunmap(sg_page(cmd->t_data_sg));
+		return;
+	}
+
+	vunmap(cmd->t_data_vmap);
+	cmd->t_data_vmap = NULL;
+}
+EXPORT_SYMBOL(transport_kunmap_data_sg);
+
+int
+target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
+		 bool zero_page)
+{
+	struct scatterlist *sg;
+	struct page *page;
+	gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
+	unsigned int nent;
+	int i = 0;
+
+	nent = DIV_ROUND_UP(length, PAGE_SIZE);
+	sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
+	if (!sg)
+		return -ENOMEM;
+
+	sg_init_table(sg, nent);
+
+	while (length) {
+		u32 page_len = min_t(u32, length, PAGE_SIZE);
+		page = alloc_page(GFP_KERNEL | zero_flag);
+		if (!page)
+			goto out;
+
+		sg_set_page(&sg[i], page, page_len, 0);
+		length -= page_len;
+		i++;
+	}
+	*sgl = sg;
+	*nents = nent;
+	return 0;
+
+out:
+	while (i > 0) {
+		i--;
+		__free_page(sg_page(&sg[i]));
+	}
+	kfree(sg);
+	return -ENOMEM;
+}
+
+/*
+ * Allocate any required resources to execute the command.  For writes we
+ * might not have the payload yet, so notify the fabric via a call to
+ * ->write_pending instead. Otherwise place it on the execution queue.
+ */
+sense_reason_t
+transport_generic_new_cmd(struct se_cmd *cmd)
+{
+	int ret = 0;
+	bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+	if (cmd->prot_op != TARGET_PROT_NORMAL &&
+	    !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
+		ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
+				       cmd->prot_length, true);
+		if (ret < 0)
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+
+	/*
+	 * Determine is the TCM fabric module has already allocated physical
+	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
+	 * beforehand.
+	 */
+	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+	    cmd->data_length) {
+
+		if ((cmd->se_cmd_flags & SCF_BIDI) ||
+		    (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
+			u32 bidi_length;
+
+			if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
+				bidi_length = cmd->t_task_nolb *
+					      cmd->se_dev->dev_attrib.block_size;
+			else
+				bidi_length = cmd->data_length;
+
+			ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
+					       &cmd->t_bidi_data_nents,
+					       bidi_length, zero_flag);
+			if (ret < 0)
+				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+
+		ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
+				       cmd->data_length, zero_flag);
+		if (ret < 0)
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+		    cmd->data_length) {
+		/*
+		 * Special case for COMPARE_AND_WRITE with fabrics
+		 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
+		 */
+		u32 caw_length = cmd->t_task_nolb *
+				 cmd->se_dev->dev_attrib.block_size;
+
+		ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
+				       &cmd->t_bidi_data_nents,
+				       caw_length, zero_flag);
+		if (ret < 0)
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+	/*
+	 * If this command is not a write we can execute it right here,
+	 * for write buffers we need to notify the fabric driver first
+	 * and let it call back once the write buffers are ready.
+	 */
+	target_add_to_state_list(cmd);
+	if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
+		target_execute_cmd(cmd);
+		return 0;
+	}
+	transport_cmd_check_stop(cmd, false, true);
+
+	ret = cmd->se_tfo->write_pending(cmd);
+	if (ret == -EAGAIN || ret == -ENOMEM)
+		goto queue_full;
+
+	/* fabric drivers should only return -EAGAIN or -ENOMEM as error */
+	WARN_ON(ret);
+
+	return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+queue_full:
+	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
+	transport_handle_queue_full(cmd, cmd->se_dev);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_new_cmd);
+
+static void transport_write_pending_qf(struct se_cmd *cmd)
+{
+	int ret;
+
+	ret = cmd->se_tfo->write_pending(cmd);
+	if (ret == -EAGAIN || ret == -ENOMEM) {
+		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
+			 cmd);
+		transport_handle_queue_full(cmd, cmd->se_dev);
+	}
+}
+
+static bool
+__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
+			   unsigned long *flags);
+
+static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	__transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+{
+	int ret = 0;
+	bool aborted = false, tas = false;
+
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+			target_wait_free_cmd(cmd, &aborted, &tas);
+
+		if (!aborted || tas)
+			ret = transport_put_cmd(cmd);
+	} else {
+		if (wait_for_tasks)
+			target_wait_free_cmd(cmd, &aborted, &tas);
+		/*
+		 * Handle WRITE failure case where transport_generic_new_cmd()
+		 * has already added se_cmd to state_list, but fabric has
+		 * failed command before I/O submission.
+		 */
+		if (cmd->state_active)
+			target_remove_from_state_list(cmd);
+
+		if (cmd->se_lun)
+			transport_lun_remove_cmd(cmd);
+
+		if (!aborted || tas)
+			ret = transport_put_cmd(cmd);
+	}
+	/*
+	 * If the task has been internally aborted due to TMR ABORT_TASK
+	 * or LUN_RESET, target_core_tmr.c is responsible for performing
+	 * the remaining calls to target_put_sess_cmd(), and not the
+	 * callers of this function.
+	 */
+	if (aborted) {
+		pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
+		wait_for_completion(&cmd->cmd_wait_comp);
+		cmd->se_tfo->release_cmd(cmd);
+		ret = 1;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(transport_generic_free_cmd);
+
+/* target_get_sess_cmd - Add command to active ->sess_cmd_list
+ * @se_cmd:	command descriptor to add
+ * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
+ */
+int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
+{
+	struct se_session *se_sess = se_cmd->se_sess;
+	unsigned long flags;
+	int ret = 0;
+
+	/*
+	 * Add a second kref if the fabric caller is expecting to handle
+	 * fabric acknowledgement that requires two target_put_sess_cmd()
+	 * invocations before se_cmd descriptor release.
+	 */
+	if (ack_kref) {
+		kref_get(&se_cmd->cmd_kref);
+		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+	}
+
+	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+	if (se_sess->sess_tearing_down) {
+		ret = -ESHUTDOWN;
+		goto out;
+	}
+	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
+out:
+	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+	if (ret && ack_kref)
+		target_put_sess_cmd(se_cmd);
+
+	return ret;
+}
+EXPORT_SYMBOL(target_get_sess_cmd);
+
+static void target_free_cmd_mem(struct se_cmd *cmd)
+{
+	transport_free_pages(cmd);
+
+	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+		core_tmr_release_req(cmd->se_tmr_req);
+	if (cmd->t_task_cdb != cmd->__t_task_cdb)
+		kfree(cmd->t_task_cdb);
+}
+
+static void target_release_cmd_kref(struct kref *kref)
+{
+	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+	struct se_session *se_sess = se_cmd->se_sess;
+	unsigned long flags;
+	bool fabric_stop;
+
+	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+
+	spin_lock(&se_cmd->t_state_lock);
+	fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
+		      (se_cmd->transport_state & CMD_T_ABORTED);
+	spin_unlock(&se_cmd->t_state_lock);
+
+	if (se_cmd->cmd_wait_set || fabric_stop) {
+		list_del_init(&se_cmd->se_cmd_list);
+		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+		target_free_cmd_mem(se_cmd);
+		complete(&se_cmd->cmd_wait_comp);
+		return;
+	}
+	list_del_init(&se_cmd->se_cmd_list);
+	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+	target_free_cmd_mem(se_cmd);
+	se_cmd->se_tfo->release_cmd(se_cmd);
+}
+
+/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
+ * @se_cmd:	command descriptor to drop
+ */
+int target_put_sess_cmd(struct se_cmd *se_cmd)
+{
+	struct se_session *se_sess = se_cmd->se_sess;
+
+	if (!se_sess) {
+		target_free_cmd_mem(se_cmd);
+		se_cmd->se_tfo->release_cmd(se_cmd);
+		return 1;
+	}
+	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
+}
+EXPORT_SYMBOL(target_put_sess_cmd);
+
+/* target_sess_cmd_list_set_waiting - Flag all commands in
+ *         sess_cmd_list to complete cmd_wait_comp.  Set
+ *         sess_tearing_down so no more commands are queued.
+ * @se_sess:	session to flag
+ */
+void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+{
+	struct se_cmd *se_cmd;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+	if (se_sess->sess_tearing_down) {
+		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+		return;
+	}
+	se_sess->sess_tearing_down = 1;
+	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+
+	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+		rc = kref_get_unless_zero(&se_cmd->cmd_kref);
+		if (rc) {
+			se_cmd->cmd_wait_set = 1;
+			spin_lock(&se_cmd->t_state_lock);
+			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+			spin_unlock(&se_cmd->t_state_lock);
+		}
+	}
+
+	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+}
+EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
+
+/* target_wait_for_sess_cmds - Wait for outstanding descriptors
+ * @se_sess:    session to wait for active I/O
+ */
+void target_wait_for_sess_cmds(struct se_session *se_sess)
+{
+	struct se_cmd *se_cmd, *tmp_cmd;
+	unsigned long flags;
+	bool tas;
+
+	list_for_each_entry_safe(se_cmd, tmp_cmd,
+				&se_sess->sess_wait_list, se_cmd_list) {
+		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+			" %d\n", se_cmd, se_cmd->t_state,
+			se_cmd->se_tfo->get_cmd_state(se_cmd));
+
+		spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+		tas = (se_cmd->transport_state & CMD_T_TAS);
+		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+		if (!target_put_sess_cmd(se_cmd)) {
+			if (tas)
+				target_put_sess_cmd(se_cmd);
+		}
+
+		wait_for_completion(&se_cmd->cmd_wait_comp);
+		pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+			" fabric state: %d\n", se_cmd, se_cmd->t_state,
+			se_cmd->se_tfo->get_cmd_state(se_cmd));
+
+		se_cmd->se_tfo->release_cmd(se_cmd);
+	}
+
+	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+	WARN_ON(!list_empty(&se_sess->sess_cmd_list));
+	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+}
+EXPORT_SYMBOL(target_wait_for_sess_cmds);
+
+static void target_lun_confirm(struct percpu_ref *ref)
+{
+	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+	complete(&lun->lun_ref_comp);
+}
+
+void transport_clear_lun_ref(struct se_lun *lun)
+{
+	/*
+	 * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
+	 * the initial reference and schedule confirm kill to be
+	 * executed after one full RCU grace period has completed.
+	 */
+	percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
+	/*
+	 * The first completion waits for percpu_ref_switch_to_atomic_rcu()
+	 * to call target_lun_confirm after lun->lun_ref has been marked
+	 * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
+	 * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
+	 * fails for all new incoming I/O.
+	 */
+	wait_for_completion(&lun->lun_ref_comp);
+	/*
+	 * The second completion waits for percpu_ref_put_many() to
+	 * invoke ->release() after lun->lun_ref has switched to
+	 * atomic_t mode, and lun->lun_ref.count has reached zero.
+	 *
+	 * At this point all target-core lun->lun_ref references have
+	 * been dropped via transport_lun_remove_cmd(), and it's safe
+	 * to proceed with the remaining LUN shutdown.
+	 */
+	wait_for_completion(&lun->lun_shutdown_comp);
+}
+
+static bool
+__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
+			   bool *aborted, bool *tas, unsigned long *flags)
+	__releases(&cmd->t_state_lock)
+	__acquires(&cmd->t_state_lock)
+{
+
+	assert_spin_locked(&cmd->t_state_lock);
+	WARN_ON_ONCE(!irqs_disabled());
+
+	if (fabric_stop)
+		cmd->transport_state |= CMD_T_FABRIC_STOP;
+
+	if (cmd->transport_state & CMD_T_ABORTED)
+		*aborted = true;
+
+	if (cmd->transport_state & CMD_T_TAS)
+		*tas = true;
+
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+		return false;
+
+	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+		return false;
+
+	if (!(cmd->transport_state & CMD_T_ACTIVE))
+		return false;
+
+	if (fabric_stop && *aborted)
+		return false;
+
+	cmd->transport_state |= CMD_T_STOP;
+
+	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
+		 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
+		 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+
+	spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+	wait_for_completion(&cmd->t_transport_stop_comp);
+
+	spin_lock_irqsave(&cmd->t_state_lock, *flags);
+	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+
+	pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
+		 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
+
+	return true;
+}
+
+/**
+ * transport_wait_for_tasks - wait for completion to occur
+ * @cmd:	command to wait
+ *
+ * Called from frontend fabric context to wait for storage engine
+ * to pause and/or release frontend generated struct se_cmd.
+ */
+bool transport_wait_for_tasks(struct se_cmd *cmd)
+{
+	unsigned long flags;
+	bool ret, aborted = false, tas = false;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(transport_wait_for_tasks);
+
+struct sense_info {
+	u8 key;
+	u8 asc;
+	u8 ascq;
+	bool add_sector_info;
+};
+
+static const struct sense_info sense_info_table[] = {
+	[TCM_NO_SENSE] = {
+		.key = NOT_READY
+	},
+	[TCM_NON_EXISTENT_LUN] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
+	},
+	[TCM_UNSUPPORTED_SCSI_OPCODE] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
+	},
+	[TCM_SECTOR_COUNT_TOO_MANY] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
+	},
+	[TCM_UNKNOWN_MODE_PAGE] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x24, /* INVALID FIELD IN CDB */
+	},
+	[TCM_CHECK_CONDITION_ABORT_CMD] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
+		.ascq = 0x03,
+	},
+	[TCM_INCORRECT_AMOUNT_OF_DATA] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x0c, /* WRITE ERROR */
+		.ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
+	},
+	[TCM_INVALID_CDB_FIELD] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x24, /* INVALID FIELD IN CDB */
+	},
+	[TCM_INVALID_PARAMETER_LIST] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
+	},
+	[TCM_PARAMETER_LIST_LENGTH_ERROR] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
+	},
+	[TCM_UNEXPECTED_UNSOLICITED_DATA] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x0c, /* WRITE ERROR */
+		.ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
+	},
+	[TCM_SERVICE_CRC_ERROR] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
+		.ascq = 0x05, /* N/A */
+	},
+	[TCM_SNACK_REJECTED] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x11, /* READ ERROR */
+		.ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
+	},
+	[TCM_WRITE_PROTECTED] = {
+		.key = DATA_PROTECT,
+		.asc = 0x27, /* WRITE PROTECTED */
+	},
+	[TCM_ADDRESS_OUT_OF_RANGE] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+	},
+	[TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
+		.key = UNIT_ATTENTION,
+	},
+	[TCM_CHECK_CONDITION_NOT_READY] = {
+		.key = NOT_READY,
+	},
+	[TCM_MISCOMPARE_VERIFY] = {
+		.key = MISCOMPARE,
+		.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
+		.ascq = 0x00,
+	},
+	[TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x10,
+		.ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
+		.add_sector_info = true,
+	},
+	[TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x10,
+		.ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
+		.add_sector_info = true,
+	},
+	[TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x10,
+		.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
+		.add_sector_info = true,
+	},
+	[TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
+		.key = COPY_ABORTED,
+		.asc = 0x0d,
+		.ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
+
+	},
+	[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
+		/*
+		 * Returning ILLEGAL REQUEST would cause immediate IO errors on
+		 * Solaris initiators.  Returning NOT READY instead means the
+		 * operations will be retried a finite number of times and we
+		 * can survive intermittent errors.
+		 */
+		.key = NOT_READY,
+		.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
+	},
+};
+
+static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
+{
+	const struct sense_info *si;
+	u8 *buffer = cmd->sense_buffer;
+	int r = (__force int)reason;
+	u8 asc, ascq;
+	bool desc_format = target_sense_desc_format(cmd->se_dev);
+
+	if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
+		si = &sense_info_table[r];
+	else
+		si = &sense_info_table[(__force int)
+				       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
+
+	if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
+		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+		WARN_ON_ONCE(asc == 0);
+	} else if (si->asc == 0) {
+		WARN_ON_ONCE(cmd->scsi_asc == 0);
+		asc = cmd->scsi_asc;
+		ascq = cmd->scsi_ascq;
+	} else {
+		asc = si->asc;
+		ascq = si->ascq;
+	}
+
+	scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
+	if (si->add_sector_info)
+		return scsi_set_sense_information(buffer,
+						  cmd->scsi_sense_length,
+						  cmd->bad_sector);
+
+	return 0;
+}
+
+int
+transport_send_check_condition_and_sense(struct se_cmd *cmd,
+		sense_reason_t reason, int from_transport)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		return 0;
+	}
+	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	if (!from_transport) {
+		int rc;
+
+		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+		cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+		cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
+		rc = translate_sense_reason(cmd, reason);
+		if (rc)
+			return rc;
+	}
+
+	trace_target_cmd_complete(cmd);
+	return cmd->se_tfo->queue_status(cmd);
+}
+EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+
+static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+	__releases(&cmd->t_state_lock)
+	__acquires(&cmd->t_state_lock)
+{
+	assert_spin_locked(&cmd->t_state_lock);
+	WARN_ON_ONCE(!irqs_disabled());
+
+	if (!(cmd->transport_state & CMD_T_ABORTED))
+		return 0;
+	/*
+	 * If cmd has been aborted but either no status is to be sent or it has
+	 * already been sent, just return
+	 */
+	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
+		if (send_status)
+			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+		return 1;
+	}
+
+	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
+		" 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
+
+	cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
+	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+	trace_target_cmd_complete(cmd);
+
+	spin_unlock_irq(&cmd->t_state_lock);
+	cmd->se_tfo->queue_status(cmd);
+	spin_lock_irq(&cmd->t_state_lock);
+
+	return 1;
+}
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+	int ret;
+
+	spin_lock_irq(&cmd->t_state_lock);
+	ret = __transport_check_aborted_status(cmd, send_status);
+	spin_unlock_irq(&cmd->t_state_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(transport_check_aborted_status);
+
+void transport_send_task_abort(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	/*
+	 * If there are still expected incoming fabric WRITEs, we wait
+	 * until until they have completed before sending a TASK_ABORTED
+	 * response.  This response with TASK_ABORTED status will be
+	 * queued back to fabric module by transport_check_aborted_status().
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+			spin_lock_irqsave(&cmd->t_state_lock, flags);
+			if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
+				spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+				goto send_abort;
+			}
+			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+			return;
+		}
+	}
+send_abort:
+	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+
+	transport_lun_remove_cmd(cmd);
+
+	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
+		 cmd->t_task_cdb[0], cmd->tag);
+
+	trace_target_cmd_complete(cmd);
+	cmd->se_tfo->queue_status(cmd);
+}
+
+static void target_tmr_work(struct work_struct *work)
+{
+	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+	struct se_device *dev = cmd->se_dev;
+	struct se_tmr_req *tmr = cmd->se_tmr_req;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->transport_state & CMD_T_ABORTED) {
+		tmr->response = TMR_FUNCTION_REJECTED;
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		goto check_stop;
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	switch (tmr->function) {
+	case TMR_ABORT_TASK:
+		core_tmr_abort_task(dev, tmr, cmd->se_sess);
+		break;
+	case TMR_ABORT_TASK_SET:
+	case TMR_CLEAR_ACA:
+	case TMR_CLEAR_TASK_SET:
+		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+		break;
+	case TMR_LUN_RESET:
+		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
+		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
+					 TMR_FUNCTION_REJECTED;
+		if (tmr->response == TMR_FUNCTION_COMPLETE) {
+			target_ua_allocate_lun(cmd->se_sess->se_node_acl,
+					       cmd->orig_fe_lun, 0x29,
+					       ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
+		}
+		break;
+	case TMR_TARGET_WARM_RESET:
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	case TMR_TARGET_COLD_RESET:
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	default:
+		pr_err("Uknown TMR function: 0x%02x.\n",
+				tmr->function);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	}
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->transport_state & CMD_T_ABORTED) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		goto check_stop;
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	cmd->se_tfo->queue_tm_rsp(cmd);
+
+check_stop:
+	transport_cmd_check_stop_to_fabric(cmd);
+}
+
+int transport_generic_handle_tmr(
+	struct se_cmd *cmd)
+{
+	unsigned long flags;
+	bool aborted = false;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->transport_state & CMD_T_ABORTED) {
+		aborted = true;
+	} else {
+		cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+		cmd->transport_state |= CMD_T_ACTIVE;
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	if (aborted) {
+		pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
+			"ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
+			cmd->se_tmr_req->ref_task_tag, cmd->tag);
+		transport_cmd_check_stop_to_fabric(cmd);
+		return 0;
+	}
+
+	INIT_WORK(&cmd->work, target_tmr_work);
+	queue_work(cmd->se_dev->tmr_wq, &cmd->work);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_tmr);
+
+bool
+target_check_wce(struct se_device *dev)
+{
+	bool wce = false;
+
+	if (dev->transport->get_write_cache)
+		wce = dev->transport->get_write_cache(dev);
+	else if (dev->dev_attrib.emulate_write_cache > 0)
+		wce = true;
+
+	return wce;
+}
+
+bool
+target_check_fua(struct se_device *dev)
+{
+	return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
+}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
new file mode 100644
index 0000000..be25eb8
--- /dev/null
+++ b/drivers/target/target_core_ua.c
@@ -0,0 +1,337 @@
+/*******************************************************************************
+ * Filename: target_core_ua.c
+ *
+ * This file contains logic for SPC-3 Unit Attention emulation
+ *
+ * (c) Copyright 2009-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi_proto.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+sense_reason_t
+target_scsi3_ua_check(struct se_cmd *cmd)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+
+	if (!sess)
+		return 0;
+
+	nacl = sess->se_node_acl;
+	if (!nacl)
+		return 0;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return 0;
+	}
+	if (!atomic_read(&deve->ua_count)) {
+		rcu_read_unlock();
+		return 0;
+	}
+	rcu_read_unlock();
+	/*
+	 * From sam4r14, section 5.14 Unit attention condition:
+	 *
+	 * a) if an INQUIRY command enters the enabled command state, the
+	 *    device server shall process the INQUIRY command and shall neither
+	 *    report nor clear any unit attention condition;
+	 * b) if a REPORT LUNS command enters the enabled command state, the
+	 *    device server shall process the REPORT LUNS command and shall not
+	 *    report any unit attention condition;
+	 * e) if a REQUEST SENSE command enters the enabled command state while
+	 *    a unit attention condition exists for the SCSI initiator port
+	 *    associated with the I_T nexus on which the REQUEST SENSE command
+	 *    was received, then the device server shall process the command
+	 *    and either:
+	 */
+	switch (cmd->t_task_cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+		return 0;
+	default:
+		return TCM_CHECK_CONDITION_UNIT_ATTENTION;
+	}
+}
+
+int core_scsi3_ua_allocate(
+	struct se_dev_entry *deve,
+	u8 asc,
+	u8 ascq)
+{
+	struct se_ua *ua, *ua_p, *ua_tmp;
+
+	ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
+	if (!ua) {
+		pr_err("Unable to allocate struct se_ua\n");
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&ua->ua_nacl_list);
+
+	ua->ua_asc = asc;
+	ua->ua_ascq = ascq;
+
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
+		/*
+		 * Do not report the same UNIT ATTENTION twice..
+		 */
+		if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
+			spin_unlock(&deve->ua_lock);
+			kmem_cache_free(se_ua_cache, ua);
+			return 0;
+		}
+		/*
+		 * Attach the highest priority Unit Attention to
+		 * the head of the list following sam4r14,
+		 * Section 5.14 Unit Attention Condition:
+		 *
+		 * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
+		 * POWER ON OCCURRED or
+		 * DEVICE INTERNAL RESET
+		 * SCSI BUS RESET OCCURRED or
+		 * MICROCODE HAS BEEN CHANGED or
+		 * protocol specific
+		 * BUS DEVICE RESET FUNCTION OCCURRED
+		 * I_T NEXUS LOSS OCCURRED
+		 * COMMANDS CLEARED BY POWER LOSS NOTIFICATION
+		 * all others                                    Lowest
+		 *
+		 * Each of the ASCQ codes listed above are defined in
+		 * the 29h ASC family, see spc4r17 Table D.1
+		 */
+		if (ua_p->ua_asc == 0x29) {
+			if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
+				list_add(&ua->ua_nacl_list,
+						&deve->ua_list);
+			else
+				list_add_tail(&ua->ua_nacl_list,
+						&deve->ua_list);
+		} else if (ua_p->ua_asc == 0x2a) {
+			/*
+			 * Incoming Family 29h ASCQ codes will override
+			 * Family 2AHh ASCQ codes for Unit Attention condition.
+			 */
+			if ((asc == 0x29) || (ascq > ua_p->ua_asc))
+				list_add(&ua->ua_nacl_list,
+					&deve->ua_list);
+			else
+				list_add_tail(&ua->ua_nacl_list,
+						&deve->ua_list);
+		} else
+			list_add_tail(&ua->ua_nacl_list,
+				&deve->ua_list);
+		spin_unlock(&deve->ua_lock);
+
+		atomic_inc_mb(&deve->ua_count);
+		return 0;
+	}
+	list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
+	spin_unlock(&deve->ua_lock);
+
+	pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:"
+		" 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
+		asc, ascq);
+
+	atomic_inc_mb(&deve->ua_count);
+	return 0;
+}
+
+void target_ua_allocate_lun(struct se_node_acl *nacl,
+			    u32 unpacked_lun, u8 asc, u8 ascq)
+{
+	struct se_dev_entry *deve;
+
+	if (!nacl)
+		return;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, unpacked_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return;
+	}
+
+	core_scsi3_ua_allocate(deve, asc, ascq);
+	rcu_read_unlock();
+}
+
+void core_scsi3_ua_release_all(
+	struct se_dev_entry *deve)
+{
+	struct se_ua *ua, *ua_p;
+
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec_mb(&deve->ua_count);
+	}
+	spin_unlock(&deve->ua_lock);
+}
+
+void core_scsi3_ua_for_check_condition(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct se_ua *ua = NULL, *ua_p;
+	int head = 1;
+
+	if (!sess)
+		return;
+
+	nacl = sess->se_node_acl;
+	if (!nacl)
+		return;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return;
+	}
+	if (!atomic_read(&deve->ua_count)) {
+		rcu_read_unlock();
+		return;
+	}
+	/*
+	 * The highest priority Unit Attentions are placed at the head of the
+	 * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
+	 * sense data for the received CDB.
+	 */
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		/*
+		 * For ua_intlck_ctrl code not equal to 00b, only report the
+		 * highest priority UNIT_ATTENTION and ASC/ASCQ without
+		 * clearing it.
+		 */
+		if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			break;
+		}
+		/*
+		 * Otherwise for the default 00b, release the UNIT ATTENTION
+		 * condition.  Return the ASC/ASCQ of the highest priority UA
+		 * (head of the list) in the outgoing CHECK_CONDITION + sense.
+		 */
+		if (head) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			head = 0;
+		}
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec_mb(&deve->ua_count);
+	}
+	spin_unlock(&deve->ua_lock);
+	rcu_read_unlock();
+
+	pr_debug("[%s]: %s UNIT ATTENTION condition with"
+		" INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
+		" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
+		nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+		(dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+		"Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
+		cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
+}
+
+int core_scsi3_ua_clear_for_request_sense(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct se_ua *ua = NULL, *ua_p;
+	int head = 1;
+
+	if (!sess)
+		return -EINVAL;
+
+	nacl = sess->se_node_acl;
+	if (!nacl)
+		return -EINVAL;
+
+	rcu_read_lock();
+	deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+	if (!deve) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	if (!atomic_read(&deve->ua_count)) {
+		rcu_read_unlock();
+		return -EPERM;
+	}
+	/*
+	 * The highest priority Unit Attentions are placed at the head of the
+	 * struct se_dev_entry->ua_list.  The First (and hence highest priority)
+	 * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
+	 * matching struct se_lun.
+	 *
+	 * Once the returning ASC/ASCQ values are set, we go ahead and
+	 * release all of the Unit Attention conditions for the associated
+	 * struct se_lun.
+	 */
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		if (head) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			head = 0;
+		}
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec_mb(&deve->ua_count);
+	}
+	spin_unlock(&deve->ua_lock);
+	rcu_read_unlock();
+
+	pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
+		" LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x,"
+		" ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+		cmd->orig_fe_lun, *asc, *ascq);
+
+	return (head) ? -EPERM : 0;
+}
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
new file mode 100644
index 0000000..bd6e78b
--- /dev/null
+++ b/drivers/target/target_core_ua.h
@@ -0,0 +1,41 @@
+#ifndef TARGET_CORE_UA_H
+#define TARGET_CORE_UA_H
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED	0x00
+#define ASCQ_29H_POWER_ON_OCCURRED				0x01
+#define ASCQ_29H_SCSI_BUS_RESET_OCCURED				0x02
+#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED		0x03
+#define ASCQ_29H_DEVICE_INTERNAL_RESET				0x04
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED	0x05
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD		0x06
+#define ASCQ_29H_NEXUS_LOSS_OCCURRED				0x07
+
+#define ASCQ_2AH_PARAMETERS_CHANGED				0x00
+#define ASCQ_2AH_MODE_PARAMETERS_CHANGED			0x01
+#define ASCQ_2AH_LOG_PARAMETERS_CHANGED				0x02
+#define ASCQ_2AH_RESERVATIONS_PREEMPTED				0x03
+#define ASCQ_2AH_RESERVATIONS_RELEASED				0x04
+#define ASCQ_2AH_REGISTRATIONS_PREEMPTED			0x05
+#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED		0x06
+#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_PRIORITY_CHANGED				0x08
+
+#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS		0x09
+
+#define ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED			0x03
+#define ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED			0x0E
+
+extern struct kmem_cache *se_ua_cache;
+
+extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
+extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8);
+extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8);
+extern void core_scsi3_ua_release_all(struct se_dev_entry *);
+extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
+						u8 *, u8 *);
+
+#endif /* TARGET_CORE_UA_H */
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
new file mode 100644
index 0000000..a7d30e8
--- /dev/null
+++ b/drivers/target/target_core_user.c
@@ -0,0 +1,1171 @@
+/*
+ * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
+ * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2015 Arrikto, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/parser.h>
+#include <linux/vmalloc.h>
+#include <linux/uio_driver.h>
+#include <linux/stringify.h>
+#include <net/genetlink.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_backend.h>
+
+#include <linux/target_core_user.h>
+
+/*
+ * Define a shared-memory interface for LIO to pass SCSI commands and
+ * data to userspace for processing. This is to allow backends that
+ * are too complex for in-kernel support to be possible.
+ *
+ * It uses the UIO framework to do a lot of the device-creation and
+ * introspection work for us.
+ *
+ * See the .h file for how the ring is laid out. Note that while the
+ * command ring is defined, the particulars of the data area are
+ * not. Offset values in the command entry point to other locations
+ * internal to the mmap()ed area. There is separate space outside the
+ * command ring for data buffers. This leaves maximum flexibility for
+ * moving buffer allocations, or even page flipping or other
+ * allocation techniques, without altering the command ring layout.
+ *
+ * SECURITY:
+ * The user process must be assumed to be malicious. There's no way to
+ * prevent it breaking the command ring protocol if it wants, but in
+ * order to prevent other issues we must only ever read *data* from
+ * the shared memory area, not offsets or sizes. This applies to
+ * command ring entries as well as the mailbox. Extra code needed for
+ * this may have a 'UAM' comment.
+ */
+
+
+#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
+
+#define CMDR_SIZE (16 * 4096)
+#define DATA_SIZE (257 * 4096)
+
+#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
+
+static struct device *tcmu_root_device;
+
+struct tcmu_hba {
+	u32 host_id;
+};
+
+#define TCMU_CONFIG_LEN 256
+
+struct tcmu_dev {
+	struct se_device se_dev;
+
+	char *name;
+	struct se_hba *hba;
+
+#define TCMU_DEV_BIT_OPEN 0
+#define TCMU_DEV_BIT_BROKEN 1
+	unsigned long flags;
+
+	struct uio_info uio_info;
+
+	struct tcmu_mailbox *mb_addr;
+	size_t dev_size;
+	u32 cmdr_size;
+	u32 cmdr_last_cleaned;
+	/* Offset of data ring from start of mb */
+	size_t data_off;
+	size_t data_size;
+	/* Ring head + tail values. */
+	/* Must add data_off and mb_addr to get the address */
+	size_t data_head;
+	size_t data_tail;
+
+	wait_queue_head_t wait_cmdr;
+	/* TODO should this be a mutex? */
+	spinlock_t cmdr_lock;
+
+	struct idr commands;
+	spinlock_t commands_lock;
+
+	struct timer_list timeout;
+
+	char dev_config[TCMU_CONFIG_LEN];
+};
+
+#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
+
+#define CMDR_OFF sizeof(struct tcmu_mailbox)
+
+struct tcmu_cmd {
+	struct se_cmd *se_cmd;
+	struct tcmu_dev *tcmu_dev;
+
+	uint16_t cmd_id;
+
+	/* Can't use se_cmd->data_length when cleaning up expired cmds, because if
+	   cmd has been completed then accessing se_cmd is off limits */
+	size_t data_length;
+
+	unsigned long deadline;
+
+#define TCMU_CMD_BIT_EXPIRED 0
+	unsigned long flags;
+};
+
+static struct kmem_cache *tcmu_cmd_cache;
+
+/* multicast group */
+enum tcmu_multicast_groups {
+	TCMU_MCGRP_CONFIG,
+};
+
+static const struct genl_multicast_group tcmu_mcgrps[] = {
+	[TCMU_MCGRP_CONFIG] = { .name = "config", },
+};
+
+/* Our generic netlink family */
+static struct genl_family tcmu_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.hdrsize = 0,
+	.name = "TCM-USER",
+	.version = 1,
+	.maxattr = TCMU_ATTR_MAX,
+	.mcgrps = tcmu_mcgrps,
+	.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
+};
+
+static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
+{
+	struct se_device *se_dev = se_cmd->se_dev;
+	struct tcmu_dev *udev = TCMU_DEV(se_dev);
+	struct tcmu_cmd *tcmu_cmd;
+	int cmd_id;
+
+	tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
+	if (!tcmu_cmd)
+		return NULL;
+
+	tcmu_cmd->se_cmd = se_cmd;
+	tcmu_cmd->tcmu_dev = udev;
+	tcmu_cmd->data_length = se_cmd->data_length;
+
+	if (se_cmd->se_cmd_flags & SCF_BIDI) {
+		BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+		tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
+	}
+
+	tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
+
+	idr_preload(GFP_KERNEL);
+	spin_lock_irq(&udev->commands_lock);
+	cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
+		USHRT_MAX, GFP_NOWAIT);
+	spin_unlock_irq(&udev->commands_lock);
+	idr_preload_end();
+
+	if (cmd_id < 0) {
+		kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
+		return NULL;
+	}
+	tcmu_cmd->cmd_id = cmd_id;
+
+	return tcmu_cmd;
+}
+
+static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
+{
+	unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK;
+
+	size = round_up(size+offset, PAGE_SIZE);
+	vaddr -= offset;
+
+	while (size) {
+		flush_dcache_page(virt_to_page(vaddr));
+		size -= PAGE_SIZE;
+	}
+}
+
+/*
+ * Some ring helper functions. We don't assume size is a power of 2 so
+ * we can't use circ_buf.h.
+ */
+static inline size_t spc_used(size_t head, size_t tail, size_t size)
+{
+	int diff = head - tail;
+
+	if (diff >= 0)
+		return diff;
+	else
+		return size + diff;
+}
+
+static inline size_t spc_free(size_t head, size_t tail, size_t size)
+{
+	/* Keep 1 byte unused or we can't tell full from empty */
+	return (size - spc_used(head, tail, size) - 1);
+}
+
+static inline size_t head_to_end(size_t head, size_t size)
+{
+	return size - head;
+}
+
+#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
+
+static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
+	struct scatterlist *data_sg, unsigned int data_nents,
+	struct iovec **iov, int *iov_cnt, bool copy_data)
+{
+	int i;
+	void *from, *to;
+	size_t copy_bytes;
+	struct scatterlist *sg;
+
+	for_each_sg(data_sg, sg, data_nents, i) {
+		copy_bytes = min_t(size_t, sg->length,
+				 head_to_end(udev->data_head, udev->data_size));
+		from = kmap_atomic(sg_page(sg)) + sg->offset;
+		to = (void *) udev->mb_addr + udev->data_off + udev->data_head;
+
+		if (copy_data) {
+			memcpy(to, from, copy_bytes);
+			tcmu_flush_dcache_range(to, copy_bytes);
+		}
+
+		/* Even iov_base is relative to mb_addr */
+		(*iov)->iov_len = copy_bytes;
+		(*iov)->iov_base = (void __user *) udev->data_off +
+						udev->data_head;
+		(*iov_cnt)++;
+		(*iov)++;
+
+		UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
+
+		/* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
+		if (sg->length != copy_bytes) {
+			void *from_skip = from + copy_bytes;
+
+			copy_bytes = sg->length - copy_bytes;
+
+			(*iov)->iov_len = copy_bytes;
+			(*iov)->iov_base = (void __user *) udev->data_off +
+							udev->data_head;
+
+			if (copy_data) {
+				to = (void *) udev->mb_addr +
+					udev->data_off + udev->data_head;
+				memcpy(to, from_skip, copy_bytes);
+				tcmu_flush_dcache_range(to, copy_bytes);
+			}
+
+			(*iov_cnt)++;
+			(*iov)++;
+
+			UPDATE_HEAD(udev->data_head,
+				copy_bytes, udev->data_size);
+		}
+
+		kunmap_atomic(from - sg->offset);
+	}
+}
+
+static void gather_and_free_data_area(struct tcmu_dev *udev,
+	struct scatterlist *data_sg, unsigned int data_nents)
+{
+	int i;
+	void *from, *to;
+	size_t copy_bytes;
+	struct scatterlist *sg;
+
+	/* It'd be easier to look at entry's iovec again, but UAM */
+	for_each_sg(data_sg, sg, data_nents, i) {
+		copy_bytes = min_t(size_t, sg->length,
+				 head_to_end(udev->data_tail, udev->data_size));
+
+		to = kmap_atomic(sg_page(sg)) + sg->offset;
+		WARN_ON(sg->length + sg->offset > PAGE_SIZE);
+		from = (void *) udev->mb_addr +
+			udev->data_off + udev->data_tail;
+		tcmu_flush_dcache_range(from, copy_bytes);
+		memcpy(to, from, copy_bytes);
+
+		UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
+
+		/* Uh oh, wrapped the data buffer for this sg's data */
+		if (sg->length != copy_bytes) {
+			void *to_skip = to + copy_bytes;
+
+			from = (void *) udev->mb_addr +
+				udev->data_off + udev->data_tail;
+			WARN_ON(udev->data_tail);
+			copy_bytes = sg->length - copy_bytes;
+			tcmu_flush_dcache_range(from, copy_bytes);
+			memcpy(to_skip, from, copy_bytes);
+
+			UPDATE_HEAD(udev->data_tail,
+				copy_bytes, udev->data_size);
+		}
+		kunmap_atomic(to - sg->offset);
+	}
+}
+
+/*
+ * We can't queue a command until we have space available on the cmd ring *and*
+ * space available on the data ring.
+ *
+ * Called with ring lock held.
+ */
+static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
+{
+	struct tcmu_mailbox *mb = udev->mb_addr;
+	size_t space;
+	u32 cmd_head;
+	size_t cmd_needed;
+
+	tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+
+	/*
+	 * If cmd end-of-ring space is too small then we need space for a NOP plus
+	 * original cmd - cmds are internally contiguous.
+	 */
+	if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
+		cmd_needed = cmd_size;
+	else
+		cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
+
+	space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
+	if (space < cmd_needed) {
+		pr_debug("no cmd space: %u %u %u\n", cmd_head,
+		       udev->cmdr_last_cleaned, udev->cmdr_size);
+		return false;
+	}
+
+	space = spc_free(udev->data_head, udev->data_tail, udev->data_size);
+	if (space < data_needed) {
+		pr_debug("no data space: %zu %zu %zu\n", udev->data_head,
+		       udev->data_tail, udev->data_size);
+		return false;
+	}
+
+	return true;
+}
+
+static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+{
+	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+	size_t base_command_size, command_size;
+	struct tcmu_mailbox *mb;
+	struct tcmu_cmd_entry *entry;
+	struct iovec *iov;
+	int iov_cnt;
+	uint32_t cmd_head;
+	uint64_t cdb_off;
+	bool copy_to_data_area;
+
+	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
+		return -EINVAL;
+
+	/*
+	 * Must be a certain minimum size for response sense info, but
+	 * also may be larger if the iov array is large.
+	 *
+	 * iovs = sgl_nents+1, for end-of-ring case, plus another 1
+	 * b/c size == offsetof one-past-element.
+	*/
+	base_command_size = max(offsetof(struct tcmu_cmd_entry,
+					 req.iov[se_cmd->t_bidi_data_nents +
+						 se_cmd->t_data_nents + 2]),
+				sizeof(struct tcmu_cmd_entry));
+	command_size = base_command_size
+		+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
+
+	WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
+
+	spin_lock_irq(&udev->cmdr_lock);
+
+	mb = udev->mb_addr;
+	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+	if ((command_size > (udev->cmdr_size / 2))
+	    || tcmu_cmd->data_length > (udev->data_size - 1))
+		pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
+			"cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
+			udev->cmdr_size, udev->data_size);
+
+	while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
+		int ret;
+		DEFINE_WAIT(__wait);
+
+		prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
+
+		pr_debug("sleeping for ring space\n");
+		spin_unlock_irq(&udev->cmdr_lock);
+		ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
+		finish_wait(&udev->wait_cmdr, &__wait);
+		if (!ret) {
+			pr_warn("tcmu: command timed out\n");
+			return -ETIMEDOUT;
+		}
+
+		spin_lock_irq(&udev->cmdr_lock);
+
+		/* We dropped cmdr_lock, cmd_head is stale */
+		cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+	}
+
+	/* Insert a PAD if end-of-ring space is too small */
+	if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
+		size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
+
+		entry = (void *) mb + CMDR_OFF + cmd_head;
+		tcmu_flush_dcache_range(entry, sizeof(*entry));
+		tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
+		tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
+		entry->hdr.cmd_id = 0; /* not used for PAD */
+		entry->hdr.kflags = 0;
+		entry->hdr.uflags = 0;
+
+		UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
+
+		cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+		WARN_ON(cmd_head != 0);
+	}
+
+	entry = (void *) mb + CMDR_OFF + cmd_head;
+	tcmu_flush_dcache_range(entry, sizeof(*entry));
+	tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
+	tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
+	entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+	entry->hdr.kflags = 0;
+	entry->hdr.uflags = 0;
+
+	/*
+	 * Fix up iovecs, and handle if allocation in data ring wrapped.
+	 */
+	iov = &entry->req.iov[0];
+	iov_cnt = 0;
+	copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
+		|| se_cmd->se_cmd_flags & SCF_BIDI);
+	alloc_and_scatter_data_area(udev, se_cmd->t_data_sg,
+		se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area);
+	entry->req.iov_cnt = iov_cnt;
+	entry->req.iov_dif_cnt = 0;
+
+	/* Handle BIDI commands */
+	iov_cnt = 0;
+	alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+		se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
+	entry->req.iov_bidi_cnt = iov_cnt;
+
+	/* All offsets relative to mb_addr, not start of entry! */
+	cdb_off = CMDR_OFF + cmd_head + base_command_size;
+	memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
+	entry->req.cdb_off = cdb_off;
+	tcmu_flush_dcache_range(entry, sizeof(*entry));
+
+	UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
+	tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+	spin_unlock_irq(&udev->cmdr_lock);
+
+	/* TODO: only if FLUSH and FUA? */
+	uio_event_notify(&udev->uio_info);
+
+	mod_timer(&udev->timeout,
+		round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
+
+	return 0;
+}
+
+static int tcmu_queue_cmd(struct se_cmd *se_cmd)
+{
+	struct se_device *se_dev = se_cmd->se_dev;
+	struct tcmu_dev *udev = TCMU_DEV(se_dev);
+	struct tcmu_cmd *tcmu_cmd;
+	int ret;
+
+	tcmu_cmd = tcmu_alloc_cmd(se_cmd);
+	if (!tcmu_cmd)
+		return -ENOMEM;
+
+	ret = tcmu_queue_cmd_ring(tcmu_cmd);
+	if (ret < 0) {
+		pr_err("TCMU: Could not queue command\n");
+		spin_lock_irq(&udev->commands_lock);
+		idr_remove(&udev->commands, tcmu_cmd->cmd_id);
+		spin_unlock_irq(&udev->commands_lock);
+
+		kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
+	}
+
+	return ret;
+}
+
+static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
+{
+	struct se_cmd *se_cmd = cmd->se_cmd;
+	struct tcmu_dev *udev = cmd->tcmu_dev;
+
+	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+		/* cmd has been completed already from timeout, just reclaim data
+		   ring space */
+		UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+		return;
+	}
+
+	if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
+		UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+		pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
+			cmd->se_cmd);
+		entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
+	} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+		memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
+			       se_cmd->scsi_sense_length);
+
+		UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+	} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+		/* Discard data_out buffer */
+		UPDATE_HEAD(udev->data_tail,
+			(size_t)se_cmd->t_data_sg->length, udev->data_size);
+
+		/* Get Data-In buffer */
+		gather_and_free_data_area(udev,
+			se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+	} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+		gather_and_free_data_area(udev,
+			se_cmd->t_data_sg, se_cmd->t_data_nents);
+	} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
+		UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+	} else if (se_cmd->data_direction != DMA_NONE) {
+		pr_warn("TCMU: data direction was %d!\n",
+			se_cmd->data_direction);
+	}
+
+	target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
+	cmd->se_cmd = NULL;
+
+	kmem_cache_free(tcmu_cmd_cache, cmd);
+}
+
+static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
+{
+	struct tcmu_mailbox *mb;
+	unsigned long flags;
+	int handled = 0;
+
+	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
+		pr_err("ring broken, not handling completions\n");
+		return 0;
+	}
+
+	spin_lock_irqsave(&udev->cmdr_lock, flags);
+
+	mb = udev->mb_addr;
+	tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+	while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
+
+		struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
+		struct tcmu_cmd *cmd;
+
+		tcmu_flush_dcache_range(entry, sizeof(*entry));
+
+		if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
+			UPDATE_HEAD(udev->cmdr_last_cleaned,
+				    tcmu_hdr_get_len(entry->hdr.len_op),
+				    udev->cmdr_size);
+			continue;
+		}
+		WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
+
+		spin_lock(&udev->commands_lock);
+		cmd = idr_find(&udev->commands, entry->hdr.cmd_id);
+		if (cmd)
+			idr_remove(&udev->commands, cmd->cmd_id);
+		spin_unlock(&udev->commands_lock);
+
+		if (!cmd) {
+			pr_err("cmd_id not found, ring is broken\n");
+			set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
+			break;
+		}
+
+		tcmu_handle_completion(cmd, entry);
+
+		UPDATE_HEAD(udev->cmdr_last_cleaned,
+			    tcmu_hdr_get_len(entry->hdr.len_op),
+			    udev->cmdr_size);
+
+		handled++;
+	}
+
+	if (mb->cmd_tail == mb->cmd_head)
+		del_timer(&udev->timeout); /* no more pending cmds */
+
+	spin_unlock_irqrestore(&udev->cmdr_lock, flags);
+
+	wake_up(&udev->wait_cmdr);
+
+	return handled;
+}
+
+static int tcmu_check_expired_cmd(int id, void *p, void *data)
+{
+	struct tcmu_cmd *cmd = p;
+
+	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
+		return 0;
+
+	if (!time_after(jiffies, cmd->deadline))
+		return 0;
+
+	set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+	target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
+	cmd->se_cmd = NULL;
+
+	return 0;
+}
+
+static void tcmu_device_timedout(unsigned long data)
+{
+	struct tcmu_dev *udev = (struct tcmu_dev *)data;
+	unsigned long flags;
+	int handled;
+
+	handled = tcmu_handle_completions(udev);
+
+	pr_warn("%d completions handled from timeout\n", handled);
+
+	spin_lock_irqsave(&udev->commands_lock, flags);
+	idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
+	spin_unlock_irqrestore(&udev->commands_lock, flags);
+
+	/*
+	 * We don't need to wakeup threads on wait_cmdr since they have their
+	 * own timeout.
+	 */
+}
+
+static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct tcmu_hba *tcmu_hba;
+
+	tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
+	if (!tcmu_hba)
+		return -ENOMEM;
+
+	tcmu_hba->host_id = host_id;
+	hba->hba_ptr = tcmu_hba;
+
+	return 0;
+}
+
+static void tcmu_detach_hba(struct se_hba *hba)
+{
+	kfree(hba->hba_ptr);
+	hba->hba_ptr = NULL;
+}
+
+static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
+{
+	struct tcmu_dev *udev;
+
+	udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
+	if (!udev)
+		return NULL;
+
+	udev->name = kstrdup(name, GFP_KERNEL);
+	if (!udev->name) {
+		kfree(udev);
+		return NULL;
+	}
+
+	udev->hba = hba;
+
+	init_waitqueue_head(&udev->wait_cmdr);
+	spin_lock_init(&udev->cmdr_lock);
+
+	idr_init(&udev->commands);
+	spin_lock_init(&udev->commands_lock);
+
+	setup_timer(&udev->timeout, tcmu_device_timedout,
+		(unsigned long)udev);
+
+	return &udev->se_dev;
+}
+
+static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
+{
+	struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
+
+	tcmu_handle_completions(tcmu_dev);
+
+	return 0;
+}
+
+/*
+ * mmap code from uio.c. Copied here because we want to hook mmap()
+ * and this stuff must come along.
+ */
+static int tcmu_find_mem_index(struct vm_area_struct *vma)
+{
+	struct tcmu_dev *udev = vma->vm_private_data;
+	struct uio_info *info = &udev->uio_info;
+
+	if (vma->vm_pgoff < MAX_UIO_MAPS) {
+		if (info->mem[vma->vm_pgoff].size == 0)
+			return -1;
+		return (int)vma->vm_pgoff;
+	}
+	return -1;
+}
+
+static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct tcmu_dev *udev = vma->vm_private_data;
+	struct uio_info *info = &udev->uio_info;
+	struct page *page;
+	unsigned long offset;
+	void *addr;
+
+	int mi = tcmu_find_mem_index(vma);
+	if (mi < 0)
+		return VM_FAULT_SIGBUS;
+
+	/*
+	 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
+	 * to use mem[N].
+	 */
+	offset = (vmf->pgoff - mi) << PAGE_SHIFT;
+
+	addr = (void *)(unsigned long)info->mem[mi].addr + offset;
+	if (info->mem[mi].memtype == UIO_MEM_LOGICAL)
+		page = virt_to_page(addr);
+	else
+		page = vmalloc_to_page(addr);
+	get_page(page);
+	vmf->page = page;
+	return 0;
+}
+
+static const struct vm_operations_struct tcmu_vm_ops = {
+	.fault = tcmu_vma_fault,
+};
+
+static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
+
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_ops = &tcmu_vm_ops;
+
+	vma->vm_private_data = udev;
+
+	/* Ensure the mmap is exactly the right size */
+	if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int tcmu_open(struct uio_info *info, struct inode *inode)
+{
+	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
+
+	/* O_EXCL not supported for char devs, so fake it? */
+	if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
+		return -EBUSY;
+
+	pr_debug("open\n");
+
+	return 0;
+}
+
+static int tcmu_release(struct uio_info *info, struct inode *inode)
+{
+	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
+
+	clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
+
+	pr_debug("close\n");
+
+	return 0;
+}
+
+static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
+{
+	struct sk_buff *skb;
+	void *msg_header;
+	int ret = -ENOMEM;
+
+	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb)
+		return ret;
+
+	msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
+	if (!msg_header)
+		goto free_skb;
+
+	ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
+	if (ret < 0)
+		goto free_skb;
+
+	ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
+	if (ret < 0)
+		goto free_skb;
+
+	genlmsg_end(skb, msg_header);
+
+	ret = genlmsg_multicast(&tcmu_genl_family, skb, 0,
+				TCMU_MCGRP_CONFIG, GFP_KERNEL);
+
+	/* We don't care if no one is listening */
+	if (ret == -ESRCH)
+		ret = 0;
+
+	return ret;
+free_skb:
+	nlmsg_free(skb);
+	return ret;
+}
+
+static int tcmu_configure_device(struct se_device *dev)
+{
+	struct tcmu_dev *udev = TCMU_DEV(dev);
+	struct tcmu_hba *hba = udev->hba->hba_ptr;
+	struct uio_info *info;
+	struct tcmu_mailbox *mb;
+	size_t size;
+	size_t used;
+	int ret = 0;
+	char *str;
+
+	info = &udev->uio_info;
+
+	size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
+			udev->dev_config);
+	size += 1; /* for \0 */
+	str = kmalloc(size, GFP_KERNEL);
+	if (!str)
+		return -ENOMEM;
+
+	used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
+
+	if (udev->dev_config[0])
+		snprintf(str + used, size - used, "/%s", udev->dev_config);
+
+	info->name = str;
+
+	udev->mb_addr = vzalloc(TCMU_RING_SIZE);
+	if (!udev->mb_addr) {
+		ret = -ENOMEM;
+		goto err_vzalloc;
+	}
+
+	/* mailbox fits in first part of CMDR space */
+	udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
+	udev->data_off = CMDR_SIZE;
+	udev->data_size = TCMU_RING_SIZE - CMDR_SIZE;
+
+	mb = udev->mb_addr;
+	mb->version = TCMU_MAILBOX_VERSION;
+	mb->cmdr_off = CMDR_OFF;
+	mb->cmdr_size = udev->cmdr_size;
+
+	WARN_ON(!PAGE_ALIGNED(udev->data_off));
+	WARN_ON(udev->data_size % PAGE_SIZE);
+
+	info->version = __stringify(TCMU_MAILBOX_VERSION);
+
+	info->mem[0].name = "tcm-user command & data buffer";
+	info->mem[0].addr = (phys_addr_t) udev->mb_addr;
+	info->mem[0].size = TCMU_RING_SIZE;
+	info->mem[0].memtype = UIO_MEM_VIRTUAL;
+
+	info->irqcontrol = tcmu_irqcontrol;
+	info->irq = UIO_IRQ_CUSTOM;
+
+	info->mmap = tcmu_mmap;
+	info->open = tcmu_open;
+	info->release = tcmu_release;
+
+	ret = uio_register_device(tcmu_root_device, info);
+	if (ret)
+		goto err_register;
+
+	/* Other attributes can be configured in userspace */
+	dev->dev_attrib.hw_block_size = 512;
+	dev->dev_attrib.hw_max_sectors = 128;
+	dev->dev_attrib.hw_queue_depth = 128;
+
+	ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
+				 udev->uio_info.uio_dev->minor);
+	if (ret)
+		goto err_netlink;
+
+	return 0;
+
+err_netlink:
+	uio_unregister_device(&udev->uio_info);
+err_register:
+	vfree(udev->mb_addr);
+err_vzalloc:
+	kfree(info->name);
+
+	return ret;
+}
+
+static int tcmu_check_pending_cmd(int id, void *p, void *data)
+{
+	struct tcmu_cmd *cmd = p;
+
+	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
+		return 0;
+	return -EINVAL;
+}
+
+static void tcmu_dev_call_rcu(struct rcu_head *p)
+{
+	struct se_device *dev = container_of(p, struct se_device, rcu_head);
+	struct tcmu_dev *udev = TCMU_DEV(dev);
+
+	kfree(udev);
+}
+
+static void tcmu_free_device(struct se_device *dev)
+{
+	struct tcmu_dev *udev = TCMU_DEV(dev);
+	int i;
+
+	del_timer_sync(&udev->timeout);
+
+	vfree(udev->mb_addr);
+
+	/* Upper layer should drain all requests before calling this */
+	spin_lock_irq(&udev->commands_lock);
+	i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL);
+	idr_destroy(&udev->commands);
+	spin_unlock_irq(&udev->commands_lock);
+	WARN_ON(i);
+
+	/* Device was configured */
+	if (udev->uio_info.uio_dev) {
+		tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
+				   udev->uio_info.uio_dev->minor);
+
+		uio_unregister_device(&udev->uio_info);
+		kfree(udev->uio_info.name);
+		kfree(udev->name);
+	}
+	call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+}
+
+enum {
+	Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
+};
+
+static match_table_t tokens = {
+	{Opt_dev_config, "dev_config=%s"},
+	{Opt_dev_size, "dev_size=%u"},
+	{Opt_hw_block_size, "hw_block_size=%u"},
+	{Opt_err, NULL}
+};
+
+static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
+		const char *page, ssize_t count)
+{
+	struct tcmu_dev *udev = TCMU_DEV(dev);
+	char *orig, *ptr, *opts, *arg_p;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, token;
+	unsigned long tmp_ul;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_dev_config:
+			if (match_strlcpy(udev->dev_config, &args[0],
+					  TCMU_CONFIG_LEN) == 0) {
+				ret = -EINVAL;
+				break;
+			}
+			pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
+			break;
+		case Opt_dev_size:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
+			kfree(arg_p);
+			if (ret < 0)
+				pr_err("kstrtoul() failed for dev_size=\n");
+			break;
+		case Opt_hw_block_size:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			ret = kstrtoul(arg_p, 0, &tmp_ul);
+			kfree(arg_p);
+			if (ret < 0) {
+				pr_err("kstrtoul() failed for hw_block_size=\n");
+				break;
+			}
+			if (!tmp_ul) {
+				pr_err("hw_block_size must be nonzero\n");
+				break;
+			}
+			dev->dev_attrib.hw_block_size = tmp_ul;
+			break;
+		default:
+			break;
+		}
+	}
+
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
+{
+	struct tcmu_dev *udev = TCMU_DEV(dev);
+	ssize_t bl = 0;
+
+	bl = sprintf(b + bl, "Config: %s ",
+		     udev->dev_config[0] ? udev->dev_config : "NULL");
+	bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
+
+	return bl;
+}
+
+static sector_t tcmu_get_blocks(struct se_device *dev)
+{
+	struct tcmu_dev *udev = TCMU_DEV(dev);
+
+	return div_u64(udev->dev_size - dev->dev_attrib.block_size,
+		       dev->dev_attrib.block_size);
+}
+
+static sense_reason_t
+tcmu_pass_op(struct se_cmd *se_cmd)
+{
+	int ret = tcmu_queue_cmd(se_cmd);
+
+	if (ret != 0)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	else
+		return TCM_NO_SENSE;
+}
+
+static sense_reason_t
+tcmu_parse_cdb(struct se_cmd *cmd)
+{
+	return passthrough_parse_cdb(cmd, tcmu_pass_op);
+}
+
+static const struct target_backend_ops tcmu_ops = {
+	.name			= "user",
+	.owner			= THIS_MODULE,
+	.transport_flags	= TRANSPORT_FLAG_PASSTHROUGH,
+	.attach_hba		= tcmu_attach_hba,
+	.detach_hba		= tcmu_detach_hba,
+	.alloc_device		= tcmu_alloc_device,
+	.configure_device	= tcmu_configure_device,
+	.free_device		= tcmu_free_device,
+	.parse_cdb		= tcmu_parse_cdb,
+	.set_configfs_dev_params = tcmu_set_configfs_dev_params,
+	.show_configfs_dev_params = tcmu_show_configfs_dev_params,
+	.get_device_type	= sbc_get_device_type,
+	.get_blocks		= tcmu_get_blocks,
+	.tb_dev_attrib_attrs	= passthrough_attrib_attrs,
+};
+
+static int __init tcmu_module_init(void)
+{
+	int ret;
+
+	BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
+
+	tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
+				sizeof(struct tcmu_cmd),
+				__alignof__(struct tcmu_cmd),
+				0, NULL);
+	if (!tcmu_cmd_cache)
+		return -ENOMEM;
+
+	tcmu_root_device = root_device_register("tcm_user");
+	if (IS_ERR(tcmu_root_device)) {
+		ret = PTR_ERR(tcmu_root_device);
+		goto out_free_cache;
+	}
+
+	ret = genl_register_family(&tcmu_genl_family);
+	if (ret < 0) {
+		goto out_unreg_device;
+	}
+
+	ret = transport_backend_register(&tcmu_ops);
+	if (ret)
+		goto out_unreg_genl;
+
+	return 0;
+
+out_unreg_genl:
+	genl_unregister_family(&tcmu_genl_family);
+out_unreg_device:
+	root_device_unregister(tcmu_root_device);
+out_free_cache:
+	kmem_cache_destroy(tcmu_cmd_cache);
+
+	return ret;
+}
+
+static void __exit tcmu_module_exit(void)
+{
+	target_backend_unregister(&tcmu_ops);
+	genl_unregister_family(&tcmu_genl_family);
+	root_device_unregister(tcmu_root_device);
+	kmem_cache_destroy(tcmu_cmd_cache);
+}
+
+MODULE_DESCRIPTION("TCM USER subsystem plugin");
+MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
+MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
+MODULE_LICENSE("GPL");
+
+module_init(tcmu_module_init);
+module_exit(tcmu_module_exit);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
new file mode 100644
index 0000000..6415e9b
--- /dev/null
+++ b/drivers/target/target_core_xcopy.c
@@ -0,0 +1,1054 @@
+/*******************************************************************************
+ * Filename: target_core_xcopy.c
+ *
+ * This file contains support for SPC-4 Extended-Copy offload with generic
+ * TCM backends.
+ *
+ * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
+ *
+ * Author:
+ * Nicholas A. Bellinger <nab@daterainc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/configfs.h>
+#include <scsi/scsi_proto.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+#include "target_core_xcopy.h"
+
+static struct workqueue_struct *xcopy_wq = NULL;
+
+static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
+{
+	int off = 0;
+
+	buf[off++] = (0x6 << 4);
+	buf[off++] = 0x01;
+	buf[off++] = 0x40;
+	buf[off] = (0x5 << 4);
+
+	spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
+	return 0;
+}
+
+static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
+					bool src)
+{
+	struct se_device *se_dev;
+	unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
+	int rc;
+
+	if (src)
+		dev_wwn = &xop->dst_tid_wwn[0];
+	else
+		dev_wwn = &xop->src_tid_wwn[0];
+
+	mutex_lock(&g_device_mutex);
+	list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
+
+		if (!se_dev->dev_attrib.emulate_3pc)
+			continue;
+
+		memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+		target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
+
+		rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
+		if (rc != 0)
+			continue;
+
+		if (src) {
+			xop->dst_dev = se_dev;
+			pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
+				" se_dev\n", xop->dst_dev);
+		} else {
+			xop->src_dev = se_dev;
+			pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
+				" se_dev\n", xop->src_dev);
+		}
+
+		rc = target_depend_item(&se_dev->dev_group.cg_item);
+		if (rc != 0) {
+			pr_err("configfs_depend_item attempt failed:"
+				" %d for se_dev: %p\n", rc, se_dev);
+			mutex_unlock(&g_device_mutex);
+			return rc;
+		}
+
+		pr_debug("Called configfs_depend_item for se_dev: %p"
+			" se_dev->se_dev_group: %p\n", se_dev,
+			&se_dev->dev_group);
+
+		mutex_unlock(&g_device_mutex);
+		return 0;
+	}
+	mutex_unlock(&g_device_mutex);
+
+	pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+	return -EINVAL;
+}
+
+static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
+				unsigned char *p, bool src)
+{
+	unsigned char *desc = p;
+	unsigned short ript;
+	u8 desig_len;
+	/*
+	 * Extract RELATIVE INITIATOR PORT IDENTIFIER
+	 */
+	ript = get_unaligned_be16(&desc[2]);
+	pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
+	/*
+	 * Check for supported code set, association, and designator type
+	 */
+	if ((desc[4] & 0x0f) != 0x1) {
+		pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
+		return -EINVAL;
+	}
+	if ((desc[5] & 0x30) != 0x00) {
+		pr_err("XCOPY 0xe4: association other than LUN not supported\n");
+		return -EINVAL;
+	}
+	if ((desc[5] & 0x0f) != 0x3) {
+		pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
+				(desc[5] & 0x0f));
+		return -EINVAL;
+	}
+	/*
+	 * Check for matching 16 byte length for NAA IEEE Registered Extended
+	 * Assigned designator
+	 */
+	desig_len = desc[7];
+	if (desig_len != 16) {
+		pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
+		return -EINVAL;
+	}
+	pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
+	/*
+	 * Check for NAA IEEE Registered Extended Assigned header..
+	 */
+	if ((desc[8] & 0xf0) != 0x60) {
+		pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
+					(desc[8] & 0xf0));
+		return -EINVAL;
+	}
+
+	if (src) {
+		memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
+		/*
+		 * Determine if the source designator matches the local device
+		 */
+		if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
+				XCOPY_NAA_IEEE_REGEX_LEN)) {
+			xop->op_origin = XCOL_SOURCE_RECV_OP;
+			xop->src_dev = se_cmd->se_dev;
+			pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
+					" received xop\n", xop->src_dev);
+		}
+	} else {
+		memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
+		/*
+		 * Determine if the destination designator matches the local device
+		 */
+		if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
+				XCOPY_NAA_IEEE_REGEX_LEN)) {
+			xop->op_origin = XCOL_DEST_RECV_OP;
+			xop->dst_dev = se_cmd->se_dev;
+			pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
+				" received xop\n", xop->dst_dev);
+		}
+	}
+
+	return 0;
+}
+
+static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
+				struct xcopy_op *xop, unsigned char *p,
+				unsigned short tdll, sense_reason_t *sense_ret)
+{
+	struct se_device *local_dev = se_cmd->se_dev;
+	unsigned char *desc = p;
+	int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
+	unsigned short start = 0;
+	bool src = true;
+
+	*sense_ret = TCM_INVALID_PARAMETER_LIST;
+
+	if (offset != 0) {
+		pr_err("XCOPY target descriptor list length is not"
+			" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+		return -EINVAL;
+	}
+	if (tdll > 64) {
+		pr_err("XCOPY target descriptor supports a maximum"
+			" two src/dest descriptors, tdll: %hu too large..\n", tdll);
+		return -EINVAL;
+	}
+	/*
+	 * Generate an IEEE Registered Extended designator based upon the
+	 * se_device the XCOPY was received upon..
+	 */
+	memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+	target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
+
+	while (start < tdll) {
+		/*
+		 * Check target descriptor identification with 0xE4 type with
+		 * use VPD 0x83 WWPN matching ..
+		 */
+		switch (desc[0]) {
+		case 0xe4:
+			rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
+							&desc[0], src);
+			if (rc != 0)
+				goto out;
+			/*
+			 * Assume target descriptors are in source -> destination order..
+			 */
+			if (src)
+				src = false;
+			else
+				src = true;
+			start += XCOPY_TARGET_DESC_LEN;
+			desc += XCOPY_TARGET_DESC_LEN;
+			ret++;
+			break;
+		default:
+			pr_err("XCOPY unsupported descriptor type code:"
+					" 0x%02x\n", desc[0]);
+			goto out;
+		}
+	}
+
+	if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+		rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
+	else
+		rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
+	/*
+	 * If a matching IEEE NAA 0x83 descriptor for the requested device
+	 * is not located on this node, return COPY_ABORTED with ASQ/ASQC
+	 * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
+	 * initiator to fall back to normal copy method.
+	 */
+	if (rc < 0) {
+		*sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
+		goto out;
+	}
+
+	pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
+		 xop->src_dev, &xop->src_tid_wwn[0]);
+	pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
+		 xop->dst_dev, &xop->dst_tid_wwn[0]);
+
+	return ret;
+
+out:
+	return -EINVAL;
+}
+
+static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
+					unsigned char *p)
+{
+	unsigned char *desc = p;
+	int dc = (desc[1] & 0x02);
+	unsigned short desc_len;
+
+	desc_len = get_unaligned_be16(&desc[2]);
+	if (desc_len != 0x18) {
+		pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
+				" %hu\n", desc_len);
+		return -EINVAL;
+	}
+
+	xop->stdi = get_unaligned_be16(&desc[4]);
+	xop->dtdi = get_unaligned_be16(&desc[6]);
+	pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
+		desc_len, xop->stdi, xop->dtdi, dc);
+
+	xop->nolb = get_unaligned_be16(&desc[10]);
+	xop->src_lba = get_unaligned_be64(&desc[12]);
+	xop->dst_lba = get_unaligned_be64(&desc[20]);
+	pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
+		xop->nolb, (unsigned long long)xop->src_lba,
+		(unsigned long long)xop->dst_lba);
+
+	if (dc != 0) {
+		xop->dbl = (desc[29] & 0xff) << 16;
+		xop->dbl |= (desc[30] & 0xff) << 8;
+		xop->dbl |= desc[31] & 0xff;
+
+		pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
+	}
+	return 0;
+}
+
+static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
+				struct xcopy_op *xop, unsigned char *p,
+				unsigned int sdll)
+{
+	unsigned char *desc = p;
+	unsigned int start = 0;
+	int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
+
+	if (offset != 0) {
+		pr_err("XCOPY segment descriptor list length is not"
+			" multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+		return -EINVAL;
+	}
+
+	while (start < sdll) {
+		/*
+		 * Check segment descriptor type code for block -> block
+		 */
+		switch (desc[0]) {
+		case 0x02:
+			rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
+			if (rc < 0)
+				goto out;
+
+			ret++;
+			start += XCOPY_SEGMENT_DESC_LEN;
+			desc += XCOPY_SEGMENT_DESC_LEN;
+			break;
+		default:
+			pr_err("XCOPY unsupported segment descriptor"
+				"type: 0x%02x\n", desc[0]);
+			goto out;
+		}
+	}
+
+	return ret;
+
+out:
+	return -EINVAL;
+}
+
+/*
+ * Start xcopy_pt ops
+ */
+
+struct xcopy_pt_cmd {
+	bool remote_port;
+	struct se_cmd se_cmd;
+	struct xcopy_op *xcopy_op;
+	struct completion xpt_passthrough_sem;
+	unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+};
+
+struct se_portal_group xcopy_pt_tpg;
+static struct se_session xcopy_pt_sess;
+static struct se_node_acl xcopy_pt_nacl;
+
+static char *xcopy_pt_get_fabric_name(void)
+{
+        return "xcopy-pt";
+}
+
+static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
+{
+        return 0;
+}
+
+static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
+{
+	struct se_device *remote_dev;
+
+	if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+		remote_dev = xop->dst_dev;
+	else
+		remote_dev = xop->src_dev;
+
+	pr_debug("Calling configfs_undepend_item for"
+		  " remote_dev: %p remote_dev->dev_group: %p\n",
+		  remote_dev, &remote_dev->dev_group.cg_item);
+
+	target_undepend_item(&remote_dev->dev_group.cg_item);
+}
+
+static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
+{
+	struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+				struct xcopy_pt_cmd, se_cmd);
+
+	kfree(xpt_cmd);
+}
+
+static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
+{
+	struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+				struct xcopy_pt_cmd, se_cmd);
+
+	complete(&xpt_cmd->xpt_passthrough_sem);
+	return 0;
+}
+
+static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static const struct target_core_fabric_ops xcopy_pt_tfo = {
+	.get_fabric_name	= xcopy_pt_get_fabric_name,
+	.get_cmd_state		= xcopy_pt_get_cmd_state,
+	.release_cmd		= xcopy_pt_release_cmd,
+	.check_stop_free	= xcopy_pt_check_stop_free,
+	.write_pending		= xcopy_pt_write_pending,
+	.write_pending_status	= xcopy_pt_write_pending_status,
+	.queue_data_in		= xcopy_pt_queue_data_in,
+	.queue_status		= xcopy_pt_queue_status,
+};
+
+/*
+ * End xcopy_pt_ops
+ */
+
+int target_xcopy_setup_pt(void)
+{
+	xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
+	if (!xcopy_wq) {
+		pr_err("Unable to allocate xcopy_wq\n");
+		return -ENOMEM;
+	}
+
+	memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
+	INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
+	INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
+	INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
+
+	xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
+
+	memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
+	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
+	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
+	memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
+	INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
+	INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
+	INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
+	spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
+
+	xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
+	xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
+
+	xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
+	xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
+
+	return 0;
+}
+
+void target_xcopy_release_pt(void)
+{
+	if (xcopy_wq)
+		destroy_workqueue(xcopy_wq);
+}
+
+static void target_xcopy_setup_pt_port(
+	struct xcopy_pt_cmd *xpt_cmd,
+	struct xcopy_op *xop,
+	bool remote_port)
+{
+	struct se_cmd *ec_cmd = xop->xop_se_cmd;
+	struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
+
+	if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
+		/*
+		 * Honor destination port reservations for X-COPY PUSH emulation
+		 * when CDB is received on local source port, and READs blocks to
+		 * WRITE on remote destination port.
+		 */
+		if (remote_port) {
+			xpt_cmd->remote_port = remote_port;
+		} else {
+			pt_cmd->se_lun = ec_cmd->se_lun;
+			pt_cmd->se_dev = ec_cmd->se_dev;
+
+			pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
+				" %p\n", pt_cmd->se_dev);
+			pt_cmd->se_lun = ec_cmd->se_lun;
+			pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
+				pt_cmd->se_lun);
+		}
+	} else {
+		/*
+		 * Honor source port reservation for X-COPY PULL emulation
+		 * when CDB is received on local desintation port, and READs
+		 * blocks from the remote source port to WRITE on local
+		 * destination port.
+		 */
+		if (remote_port) {
+			xpt_cmd->remote_port = remote_port;
+		} else {
+			pt_cmd->se_lun = ec_cmd->se_lun;
+			pt_cmd->se_dev = ec_cmd->se_dev;
+
+			pr_debug("Honoring local DST port from ec_cmd->se_dev:"
+				" %p\n", pt_cmd->se_dev);
+			pt_cmd->se_lun = ec_cmd->se_lun;
+			pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
+				pt_cmd->se_lun);
+		}
+	}
+}
+
+static void target_xcopy_init_pt_lun(struct se_device *se_dev,
+		struct se_cmd *pt_cmd, bool remote_port)
+{
+	/*
+	 * Don't allocate + init an pt_cmd->se_lun if honoring local port for
+	 * reservations.  The pt_cmd->se_lun pointer will be setup from within
+	 * target_xcopy_setup_pt_port()
+	 */
+	if (remote_port) {
+		pr_debug("Setup emulated se_dev: %p from se_dev\n",
+			pt_cmd->se_dev);
+		pt_cmd->se_lun = &se_dev->xcopy_lun;
+		pt_cmd->se_dev = se_dev;
+	}
+
+	pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+}
+
+static int target_xcopy_setup_pt_cmd(
+	struct xcopy_pt_cmd *xpt_cmd,
+	struct xcopy_op *xop,
+	struct se_device *se_dev,
+	unsigned char *cdb,
+	bool remote_port,
+	bool alloc_mem)
+{
+	struct se_cmd *cmd = &xpt_cmd->se_cmd;
+	sense_reason_t sense_rc;
+	int ret = 0, rc;
+	/*
+	 * Setup LUN+port to honor reservations based upon xop->op_origin for
+	 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
+	 */
+	target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
+
+	xpt_cmd->xcopy_op = xop;
+	target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
+
+	cmd->tag = 0;
+	sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
+	if (sense_rc) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (alloc_mem) {
+		rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
+				      cmd->data_length, false);
+		if (rc < 0) {
+			ret = rc;
+			goto out;
+		}
+		/*
+		 * Set this bit so that transport_free_pages() allows the
+		 * caller to release SGLs + physical memory allocated by
+		 * transport_generic_get_mem()..
+		 */
+		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+	} else {
+		/*
+		 * Here the previously allocated SGLs for the internal READ
+		 * are mapped zero-copy to the internal WRITE.
+		 */
+		sense_rc = transport_generic_map_mem_to_cmd(cmd,
+					xop->xop_data_sg, xop->xop_data_nents,
+					NULL, 0);
+		if (sense_rc) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
+			 " %u\n", cmd->t_data_sg, cmd->t_data_nents);
+	}
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
+{
+	struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
+	sense_reason_t sense_rc;
+
+	sense_rc = transport_generic_new_cmd(se_cmd);
+	if (sense_rc)
+		return -EINVAL;
+
+	if (se_cmd->data_direction == DMA_TO_DEVICE)
+		target_execute_cmd(se_cmd);
+
+	wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
+
+	pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
+			se_cmd->scsi_status);
+
+	return (se_cmd->scsi_status) ? -EINVAL : 0;
+}
+
+static int target_xcopy_read_source(
+	struct se_cmd *ec_cmd,
+	struct xcopy_op *xop,
+	struct se_device *src_dev,
+	sector_t src_lba,
+	u32 src_sectors)
+{
+	struct xcopy_pt_cmd *xpt_cmd;
+	struct se_cmd *se_cmd;
+	u32 length = (src_sectors * src_dev->dev_attrib.block_size);
+	int rc;
+	unsigned char cdb[16];
+	bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
+
+	xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
+	if (!xpt_cmd) {
+		pr_err("Unable to allocate xcopy_pt_cmd\n");
+		return -ENOMEM;
+	}
+	init_completion(&xpt_cmd->xpt_passthrough_sem);
+	se_cmd = &xpt_cmd->se_cmd;
+
+	memset(&cdb[0], 0, 16);
+	cdb[0] = READ_16;
+	put_unaligned_be64(src_lba, &cdb[2]);
+	put_unaligned_be32(src_sectors, &cdb[10]);
+	pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
+		(unsigned long long)src_lba, src_sectors, length);
+
+	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+			      DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
+	xop->src_pt_cmd = xpt_cmd;
+
+	rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
+				remote_port, true);
+	if (rc < 0) {
+		ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+		transport_generic_free_cmd(se_cmd, 0);
+		return rc;
+	}
+
+	xop->xop_data_sg = se_cmd->t_data_sg;
+	xop->xop_data_nents = se_cmd->t_data_nents;
+	pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
+		" memory\n", xop->xop_data_sg, xop->xop_data_nents);
+
+	rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+	if (rc < 0) {
+		ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+		transport_generic_free_cmd(se_cmd, 0);
+		return rc;
+	}
+	/*
+	 * Clear off the allocated t_data_sg, that has been saved for
+	 * zero-copy WRITE submission reuse in struct xcopy_op..
+	 */
+	se_cmd->t_data_sg = NULL;
+	se_cmd->t_data_nents = 0;
+
+	return 0;
+}
+
+static int target_xcopy_write_destination(
+	struct se_cmd *ec_cmd,
+	struct xcopy_op *xop,
+	struct se_device *dst_dev,
+	sector_t dst_lba,
+	u32 dst_sectors)
+{
+	struct xcopy_pt_cmd *xpt_cmd;
+	struct se_cmd *se_cmd;
+	u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
+	int rc;
+	unsigned char cdb[16];
+	bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
+
+	xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
+	if (!xpt_cmd) {
+		pr_err("Unable to allocate xcopy_pt_cmd\n");
+		return -ENOMEM;
+	}
+	init_completion(&xpt_cmd->xpt_passthrough_sem);
+	se_cmd = &xpt_cmd->se_cmd;
+
+	memset(&cdb[0], 0, 16);
+	cdb[0] = WRITE_16;
+	put_unaligned_be64(dst_lba, &cdb[2]);
+	put_unaligned_be32(dst_sectors, &cdb[10]);
+	pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
+		(unsigned long long)dst_lba, dst_sectors, length);
+
+	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+			      DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
+	xop->dst_pt_cmd = xpt_cmd;
+
+	rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
+				remote_port, false);
+	if (rc < 0) {
+		struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+		ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+		/*
+		 * If the failure happened before the t_mem_list hand-off in
+		 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
+		 * core releases this memory on error during X-COPY WRITE I/O.
+		 */
+		src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+		src_cmd->t_data_sg = xop->xop_data_sg;
+		src_cmd->t_data_nents = xop->xop_data_nents;
+
+		transport_generic_free_cmd(se_cmd, 0);
+		return rc;
+	}
+
+	rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+	if (rc < 0) {
+		ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+		se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+		transport_generic_free_cmd(se_cmd, 0);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void target_xcopy_do_work(struct work_struct *work)
+{
+	struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
+	struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
+	struct se_cmd *ec_cmd = xop->xop_se_cmd;
+	sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
+	unsigned int max_sectors;
+	int rc;
+	unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
+
+	end_lba = src_lba + nolb;
+	/*
+	 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
+	 * smallest max_sectors between src_dev + dev_dev, or
+	 */
+	max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
+			  dst_dev->dev_attrib.hw_max_sectors);
+	max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
+
+	max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
+
+	pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
+			nolb, max_nolb, (unsigned long long)end_lba);
+	pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
+			(unsigned long long)src_lba, (unsigned long long)dst_lba);
+
+	while (src_lba < end_lba) {
+		cur_nolb = min(nolb, max_nolb);
+
+		pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
+			" cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
+
+		rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
+		if (rc < 0)
+			goto out;
+
+		src_lba += cur_nolb;
+		pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
+				(unsigned long long)src_lba);
+
+		pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
+			" cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
+
+		rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
+						dst_lba, cur_nolb);
+		if (rc < 0) {
+			transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
+			goto out;
+		}
+
+		dst_lba += cur_nolb;
+		pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
+				(unsigned long long)dst_lba);
+
+		copied_nolb += cur_nolb;
+		nolb -= cur_nolb;
+
+		transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
+		xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+		transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
+	}
+
+	xcopy_pt_undepend_remotedev(xop);
+	kfree(xop);
+
+	pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
+		(unsigned long long)src_lba, (unsigned long long)dst_lba);
+	pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
+		copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
+
+	pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
+	target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
+	return;
+
+out:
+	xcopy_pt_undepend_remotedev(xop);
+	kfree(xop);
+	/*
+	 * Don't override an error scsi status if it has already been set
+	 */
+	if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
+		pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
+			" CHECK_CONDITION -> sending response\n", rc);
+		ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+	}
+	target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
+}
+
+sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+{
+	struct se_device *dev = se_cmd->se_dev;
+	struct xcopy_op *xop = NULL;
+	unsigned char *p = NULL, *seg_desc;
+	unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
+	sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
+	int rc;
+	unsigned short tdll;
+
+	if (!dev->dev_attrib.emulate_3pc) {
+		pr_err("EXTENDED_COPY operation explicitly disabled\n");
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+	}
+
+	sa = se_cmd->t_task_cdb[1] & 0x1f;
+	if (sa != 0x00) {
+		pr_err("EXTENDED_COPY(LID4) not supported\n");
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+	}
+
+	xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
+	if (!xop) {
+		pr_err("Unable to allocate xcopy_op\n");
+		return TCM_OUT_OF_RESOURCES;
+	}
+	xop->xop_se_cmd = se_cmd;
+
+	p = transport_kmap_data_sg(se_cmd);
+	if (!p) {
+		pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
+		kfree(xop);
+		return TCM_OUT_OF_RESOURCES;
+	}
+
+	list_id = p[0];
+	list_id_usage = (p[1] & 0x18) >> 3;
+
+	/*
+	 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
+	 */
+	tdll = get_unaligned_be16(&p[2]);
+	sdll = get_unaligned_be32(&p[8]);
+
+	inline_dl = get_unaligned_be32(&p[12]);
+	if (inline_dl != 0) {
+		pr_err("XCOPY with non zero inline data length\n");
+		goto out;
+	}
+
+	pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
+		" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
+		tdll, sdll, inline_dl);
+
+	rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
+	if (rc <= 0)
+		goto out;
+
+	if (xop->src_dev->dev_attrib.block_size !=
+	    xop->dst_dev->dev_attrib.block_size) {
+		pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
+		       " block_size: %u currently unsupported\n",
+			xop->src_dev->dev_attrib.block_size,
+			xop->dst_dev->dev_attrib.block_size);
+		xcopy_pt_undepend_remotedev(xop);
+		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		goto out;
+	}
+
+	pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
+				rc * XCOPY_TARGET_DESC_LEN);
+	seg_desc = &p[16];
+	seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
+
+	rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
+	if (rc <= 0) {
+		xcopy_pt_undepend_remotedev(xop);
+		goto out;
+	}
+	transport_kunmap_data_sg(se_cmd);
+
+	pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+				rc * XCOPY_SEGMENT_DESC_LEN);
+	INIT_WORK(&xop->xop_work, target_xcopy_do_work);
+	queue_work(xcopy_wq, &xop->xop_work);
+	return TCM_NO_SENSE;
+
+out:
+	if (p)
+		transport_kunmap_data_sg(se_cmd);
+	kfree(xop);
+	return ret;
+}
+
+static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
+{
+	unsigned char *p;
+
+	p = transport_kmap_data_sg(se_cmd);
+	if (!p) {
+		pr_err("transport_kmap_data_sg failed in"
+		       " target_rcr_operating_parameters\n");
+		return TCM_OUT_OF_RESOURCES;
+	}
+
+	if (se_cmd->data_length < 54) {
+		pr_err("Receive Copy Results Op Parameters length"
+		       " too small: %u\n", se_cmd->data_length);
+		transport_kunmap_data_sg(se_cmd);
+		return TCM_INVALID_CDB_FIELD;
+	}
+	/*
+	 * Set SNLID=1 (Supports no List ID)
+	 */
+	p[4] = 0x1;
+	/*
+	 * MAXIMUM TARGET DESCRIPTOR COUNT
+	 */
+	put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
+	/*
+	 * MAXIMUM SEGMENT DESCRIPTOR COUNT
+	 */
+	put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
+	/*
+	 * MAXIMUM DESCRIPTOR LIST LENGTH
+	 */
+	put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
+	/*
+	 * MAXIMUM SEGMENT LENGTH
+	 */
+	put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
+	/*
+	 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
+	 */
+	put_unaligned_be32(0x0, &p[20]);
+	/*
+	 * HELD DATA LIMIT
+	 */
+	put_unaligned_be32(0x0, &p[24]);
+	/*
+	 * MAXIMUM STREAM DEVICE TRANSFER SIZE
+	 */
+	put_unaligned_be32(0x0, &p[28]);
+	/*
+	 * TOTAL CONCURRENT COPIES
+	 */
+	put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
+	/*
+	 * MAXIMUM CONCURRENT COPIES
+	 */
+	p[36] = RCR_OP_MAX_CONCURR_COPIES;
+	/*
+	 * DATA SEGMENT GRANULARITY (log 2)
+	 */
+	p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
+	/*
+	 * INLINE DATA GRANULARITY log 2)
+	 */
+	p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
+	/*
+	 * HELD DATA GRANULARITY
+	 */
+	p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
+	/*
+	 * IMPLEMENTED DESCRIPTOR LIST LENGTH
+	 */
+	p[43] = 0x2;
+	/*
+	 * List of implemented descriptor type codes (ordered)
+	 */
+	p[44] = 0x02; /* Copy Block to Block device */
+	p[45] = 0xe4; /* Identification descriptor target descriptor */
+
+	/*
+	 * AVAILABLE DATA (n-3)
+	 */
+	put_unaligned_be32(42, &p[0]);
+
+	transport_kunmap_data_sg(se_cmd);
+	target_complete_cmd(se_cmd, GOOD);
+
+	return TCM_NO_SENSE;
+}
+
+sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
+{
+	unsigned char *cdb = &se_cmd->t_task_cdb[0];
+	int sa = (cdb[1] & 0x1f), list_id = cdb[2];
+	sense_reason_t rc = TCM_NO_SENSE;
+
+	pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
+		" 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
+
+	if (list_id != 0) {
+		pr_err("Receive Copy Results with non zero list identifier"
+		       " not supported\n");
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	switch (sa) {
+	case RCR_SA_OPERATING_PARAMETERS:
+		rc = target_rcr_operating_parameters(se_cmd);
+		break;
+	case RCR_SA_COPY_STATUS:
+	case RCR_SA_RECEIVE_DATA:
+	case RCR_SA_FAILED_SEGMENT_DETAILS:
+	default:
+		pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	return rc;
+}
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
new file mode 100644
index 0000000..700a981
--- /dev/null
+++ b/drivers/target/target_core_xcopy.h
@@ -0,0 +1,62 @@
+#define XCOPY_TARGET_DESC_LEN		32
+#define XCOPY_SEGMENT_DESC_LEN		28
+#define XCOPY_NAA_IEEE_REGEX_LEN	16
+#define XCOPY_MAX_SECTORS		1024
+
+enum xcopy_origin_list {
+	XCOL_SOURCE_RECV_OP = 0x01,
+	XCOL_DEST_RECV_OP = 0x02,
+};
+
+struct xcopy_pt_cmd;
+
+struct xcopy_op {
+	int op_origin;
+
+	struct se_cmd *xop_se_cmd;
+	struct se_device *src_dev;
+	unsigned char src_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+	struct se_device *dst_dev;
+	unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+	unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+
+	sector_t src_lba;
+	sector_t dst_lba;
+	unsigned short stdi;
+	unsigned short dtdi;
+	unsigned short nolb;
+	unsigned int dbl;
+
+	struct xcopy_pt_cmd *src_pt_cmd;
+	struct xcopy_pt_cmd *dst_pt_cmd;
+
+	u32 xop_data_nents;
+	struct scatterlist *xop_data_sg;
+	struct work_struct xop_work;
+};
+
+/*
+ * Receive Copy Results Sevice Actions
+ */
+#define RCR_SA_COPY_STATUS		0x00
+#define RCR_SA_RECEIVE_DATA		0x01
+#define RCR_SA_OPERATING_PARAMETERS	0x03
+#define RCR_SA_FAILED_SEGMENT_DETAILS	0x04
+
+/*
+ * Receive Copy Results defs for Operating Parameters
+ */
+#define RCR_OP_MAX_TARGET_DESC_COUNT	0x2
+#define RCR_OP_MAX_SG_DESC_COUNT	0x1
+#define RCR_OP_MAX_DESC_LIST_LEN	1024
+#define RCR_OP_MAX_SEGMENT_LEN		268435456 /* 256 MB */
+#define RCR_OP_TOTAL_CONCURR_COPIES	0x1 /* Must be <= 16384 */
+#define RCR_OP_MAX_CONCURR_COPIES	0x1 /* Must be <= 255 */
+#define RCR_OP_DATA_SEG_GRAN_LOG2	9 /* 512 bytes in log 2 */
+#define RCR_OP_INLINE_DATA_GRAN_LOG2	9 /* 512 bytes in log 2 */
+#define RCR_OP_HELD_DATA_GRAN_LOG2	9 /* 512 bytes in log 2 */
+
+extern int target_xcopy_setup_pt(void);
+extern void target_xcopy_release_pt(void);
+extern sense_reason_t target_do_xcopy(struct se_cmd *);
+extern sense_reason_t target_do_receive_copy_results(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/Kconfig b/drivers/target/tcm_fc/Kconfig
new file mode 100644
index 0000000..40caf45
--- /dev/null
+++ b/drivers/target/tcm_fc/Kconfig
@@ -0,0 +1,5 @@
+config TCM_FC
+	tristate "TCM_FC fabric Plugin"
+	depends on LIBFC
+	help
+	Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
new file mode 100644
index 0000000..20b14bb
--- /dev/null
+++ b/drivers/target/tcm_fc/Makefile
@@ -0,0 +1,6 @@
+tcm_fc-y +=		tfc_cmd.o \
+			tfc_conf.o \
+			tfc_io.o \
+			tfc_sess.o
+
+obj-$(CONFIG_TCM_FC)	+= tcm_fc.o
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
new file mode 100644
index 0000000..39909da
--- /dev/null
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __TCM_FC_H__
+#define __TCM_FC_H__
+
+#define FT_VERSION "0.4"
+
+#define FT_NAMELEN 32		/* length of ASCII WWPNs including pad */
+#define FT_TPG_NAMELEN 32	/* max length of TPG name */
+#define FT_LUN_NAMELEN 32	/* max length of LUN name */
+#define TCM_FC_DEFAULT_TAGS 512	/* tags used for per-session preallocation */
+
+struct ft_transport_id {
+	__u8	format;
+	__u8	__resvd1[7];
+	__u8	wwpn[8];
+	__u8	__resvd2[8];
+} __attribute__((__packed__));
+
+/*
+ * Session (remote port).
+ */
+struct ft_sess {
+	u32 port_id;			/* for hash lookup use only */
+	u32 params;
+	u16 max_frame;			/* maximum frame size */
+	u64 port_name;			/* port name for transport ID */
+	struct ft_tport *tport;
+	struct se_session *se_sess;
+	struct hlist_node hash;		/* linkage in ft_sess_hash table */
+	struct rcu_head rcu;
+	struct kref kref;		/* ref for hash and outstanding I/Os */
+};
+
+/*
+ * Hash table of sessions per local port.
+ * Hash lookup by remote port FC_ID.
+ */
+#define	FT_SESS_HASH_BITS	6
+#define	FT_SESS_HASH_SIZE	(1 << FT_SESS_HASH_BITS)
+
+/*
+ * Per local port data.
+ * This is created only after a TPG exists that allows target function
+ * for the local port.  If the TPG exists, this is allocated when
+ * we're notified that the local port has been created, or when
+ * the first PRLI provider callback is received.
+ */
+struct ft_tport {
+	struct fc_lport *lport;
+	struct ft_tpg *tpg;		/* NULL if TPG deleted before tport */
+	u32	sess_count;		/* number of sessions in hash */
+	struct rcu_head rcu;
+	struct hlist_head hash[FT_SESS_HASH_SIZE];	/* list of sessions */
+};
+
+/*
+ * Node ID and authentication.
+ */
+struct ft_node_auth {
+	u64	port_name;
+	u64	node_name;
+};
+
+/*
+ * Node ACL for FC remote port session.
+ */
+struct ft_node_acl {
+	struct se_node_acl se_node_acl;
+	struct ft_node_auth node_auth;
+};
+
+struct ft_lun {
+	u32 index;
+	char name[FT_LUN_NAMELEN];
+};
+
+/*
+ * Target portal group (local port).
+ */
+struct ft_tpg {
+	u32 index;
+	struct ft_lport_wwn *lport_wwn;
+	struct ft_tport *tport;		/* active tport or NULL */
+	struct list_head lun_list;	/* head of LUNs */
+	struct se_portal_group se_tpg;
+	struct workqueue_struct *workqueue;
+};
+
+struct ft_lport_wwn {
+	u64 wwpn;
+	char name[FT_NAMELEN];
+	struct list_head ft_wwn_node;
+	struct ft_tpg *tpg;
+	struct se_wwn se_wwn;
+};
+
+/*
+ * Commands
+ */
+struct ft_cmd {
+	struct ft_sess *sess;		/* session held for cmd */
+	struct fc_seq *seq;		/* sequence in exchange mgr */
+	struct se_cmd se_cmd;		/* Local TCM I/O descriptor */
+	struct fc_frame *req_frame;
+	u32 write_data_len;		/* data received on writes */
+	struct work_struct work;
+	/* Local sense buffer */
+	unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
+	u32 was_ddp_setup:1;		/* Set only if ddp is setup */
+	u32 aborted:1;			/* Set if aborted by reset or timeout */
+	struct scatterlist *sg;		/* Set only if DDP is setup */
+	u32 sg_cnt;			/* No. of item in scatterlist */
+};
+
+extern struct mutex ft_lport_lock;
+extern struct fc4_prov ft_prov;
+extern unsigned int ft_debug_logging;
+
+/*
+ * Fabric methods.
+ */
+
+/*
+ * Session ops.
+ */
+void ft_sess_put(struct ft_sess *);
+int ft_sess_shutdown(struct se_session *);
+void ft_sess_close(struct se_session *);
+u32 ft_sess_get_index(struct se_session *);
+u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
+
+void ft_lport_add(struct fc_lport *, void *);
+void ft_lport_del(struct fc_lport *, void *);
+int ft_lport_notify(struct notifier_block *, unsigned long, void *);
+
+/*
+ * IO methods.
+ */
+int ft_check_stop_free(struct se_cmd *);
+void ft_release_cmd(struct se_cmd *);
+int ft_queue_status(struct se_cmd *);
+int ft_queue_data_in(struct se_cmd *);
+int ft_write_pending(struct se_cmd *);
+int ft_write_pending_status(struct se_cmd *);
+int ft_get_cmd_state(struct se_cmd *);
+void ft_queue_tm_resp(struct se_cmd *);
+void ft_aborted_task(struct se_cmd *);
+
+/*
+ * other internal functions.
+ */
+void ft_recv_req(struct ft_sess *, struct fc_frame *);
+struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
+struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
+
+void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
+void ft_dump_cmd(struct ft_cmd *, const char *caller);
+
+ssize_t ft_format_wwn(char *, size_t, u64);
+
+/*
+ * Underlying HW specific helper function
+ */
+void ft_invl_hw_context(struct ft_cmd *);
+
+#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
new file mode 100644
index 0000000..064d6df
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* XXX TBD some includes may be extraneous */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/hash.h>
+#include <linux/percpu_ida.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "tcm_fc.h"
+
+/*
+ * Dump cmd state for debugging.
+ */
+static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
+{
+	struct fc_exch *ep;
+	struct fc_seq *sp;
+	struct se_cmd *se_cmd;
+	struct scatterlist *sg;
+	int count;
+
+	se_cmd = &cmd->se_cmd;
+	pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
+		caller, cmd, cmd->sess, cmd->seq, se_cmd);
+
+	pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
+		caller, cmd, se_cmd->t_data_nents,
+	       se_cmd->data_length, se_cmd->se_cmd_flags);
+
+	for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
+		pr_debug("%s: cmd %p sg %p page %p "
+			"len 0x%x off 0x%x\n",
+			caller, cmd, sg,
+			sg_page(sg), sg->length, sg->offset);
+
+	sp = cmd->seq;
+	if (sp) {
+		ep = fc_seq_exch(sp);
+		pr_debug("%s: cmd %p sid %x did %x "
+			"ox_id %x rx_id %x seq_id %x e_stat %x\n",
+			caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
+			sp->id, ep->esb_stat);
+	}
+}
+
+void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
+{
+	if (unlikely(ft_debug_logging))
+		_ft_dump_cmd(cmd, caller);
+}
+
+static void ft_free_cmd(struct ft_cmd *cmd)
+{
+	struct fc_frame *fp;
+	struct fc_lport *lport;
+	struct ft_sess *sess;
+
+	if (!cmd)
+		return;
+	sess = cmd->sess;
+	fp = cmd->req_frame;
+	lport = fr_dev(fp);
+	if (fr_seq(fp))
+		lport->tt.seq_release(fr_seq(fp));
+	fc_frame_free(fp);
+	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+	ft_sess_put(sess);	/* undo get from lookup at recv */
+}
+
+void ft_release_cmd(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+
+	ft_free_cmd(cmd);
+}
+
+int ft_check_stop_free(struct se_cmd *se_cmd)
+{
+	transport_generic_free_cmd(se_cmd, 0);
+	return 1;
+}
+
+/*
+ * Send response.
+ */
+int ft_queue_status(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+	struct fc_frame *fp;
+	struct fcp_resp_with_ext *fcp;
+	struct fc_lport *lport;
+	struct fc_exch *ep;
+	size_t len;
+	int rc;
+
+	if (cmd->aborted)
+		return 0;
+	ft_dump_cmd(cmd, __func__);
+	ep = fc_seq_exch(cmd->seq);
+	lport = ep->lp;
+	len = sizeof(*fcp) + se_cmd->scsi_sense_length;
+	fp = fc_frame_alloc(lport, len);
+	if (!fp) {
+		se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+		return -ENOMEM;
+	}
+
+	fcp = fc_frame_payload_get(fp, len);
+	memset(fcp, 0, len);
+	fcp->resp.fr_status = se_cmd->scsi_status;
+
+	len = se_cmd->scsi_sense_length;
+	if (len) {
+		fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
+		fcp->ext.fr_sns_len = htonl(len);
+		memcpy((fcp + 1), se_cmd->sense_buffer, len);
+	}
+
+	/*
+	 * Test underflow and overflow with one mask.  Usually both are off.
+	 * Bidirectional commands are not handled yet.
+	 */
+	if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
+		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+			fcp->resp.fr_flags |= FCP_RESID_OVER;
+		else
+			fcp->resp.fr_flags |= FCP_RESID_UNDER;
+		fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
+	}
+
+	/*
+	 * Send response.
+	 */
+	cmd->seq = lport->tt.seq_start_next(cmd->seq);
+	fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
+		       FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
+
+	rc = lport->tt.seq_send(lport, cmd->seq, fp);
+	if (rc) {
+		pr_info_ratelimited("%s: Failed to send response frame %p, "
+				    "xid <0x%x>\n", __func__, fp, ep->xid);
+		/*
+		 * Generate a TASK_SET_FULL status to notify the initiator
+		 * to reduce it's queue_depth after the se_cmd response has
+		 * been re-queued by target-core.
+		 */
+		se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+		return -ENOMEM;
+	}
+	lport->tt.exch_done(cmd->seq);
+	return 0;
+}
+
+int ft_write_pending_status(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+
+	return cmd->write_data_len != se_cmd->data_length;
+}
+
+/*
+ * Send TX_RDY (transfer ready).
+ */
+int ft_write_pending(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+	struct fc_frame *fp;
+	struct fcp_txrdy *txrdy;
+	struct fc_lport *lport;
+	struct fc_exch *ep;
+	struct fc_frame_header *fh;
+	u32 f_ctl;
+
+	ft_dump_cmd(cmd, __func__);
+
+	if (cmd->aborted)
+		return 0;
+	ep = fc_seq_exch(cmd->seq);
+	lport = ep->lp;
+	fp = fc_frame_alloc(lport, sizeof(*txrdy));
+	if (!fp)
+		return -ENOMEM; /* Signal QUEUE_FULL */
+
+	txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
+	memset(txrdy, 0, sizeof(*txrdy));
+	txrdy->ft_burst_len = htonl(se_cmd->data_length);
+
+	cmd->seq = lport->tt.seq_start_next(cmd->seq);
+	fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
+		       FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+	fh = fc_frame_header_get(fp);
+	f_ctl = ntoh24(fh->fh_f_ctl);
+
+	/* Only if it is 'Exchange Responder' */
+	if (f_ctl & FC_FC_EX_CTX) {
+		/* Target is 'exchange responder' and sending XFER_READY
+		 * to 'exchange initiator (initiator)'
+		 */
+		if ((ep->xid <= lport->lro_xid) &&
+		    (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
+			if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
+			    lport->tt.ddp_target(lport, ep->xid,
+						 se_cmd->t_data_sg,
+						 se_cmd->t_data_nents))
+				cmd->was_ddp_setup = 1;
+		}
+	}
+	lport->tt.seq_send(lport, cmd->seq, fp);
+	return 0;
+}
+
+int ft_get_cmd_state(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+/*
+ * FC sequence response handler for follow-on sequences (data) and aborts.
+ */
+static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+	struct ft_cmd *cmd = arg;
+	struct fc_frame_header *fh;
+
+	if (IS_ERR(fp)) {
+		/* XXX need to find cmd if queued */
+		cmd->seq = NULL;
+		cmd->aborted = true;
+		return;
+	}
+
+	fh = fc_frame_header_get(fp);
+
+	switch (fh->fh_r_ctl) {
+	case FC_RCTL_DD_SOL_DATA:	/* write data */
+		ft_recv_write_data(cmd, fp);
+		break;
+	case FC_RCTL_DD_UNSOL_CTL:	/* command */
+	case FC_RCTL_DD_SOL_CTL:	/* transfer ready */
+	case FC_RCTL_DD_DATA_DESC:	/* transfer ready */
+	default:
+		pr_debug("%s: unhandled frame r_ctl %x\n",
+		       __func__, fh->fh_r_ctl);
+		ft_invl_hw_context(cmd);
+		fc_frame_free(fp);
+		transport_generic_free_cmd(&cmd->se_cmd, 0);
+		break;
+	}
+}
+
+/*
+ * Send a FCP response including SCSI status and optional FCP rsp_code.
+ * status is SAM_STAT_GOOD (zero) iff code is valid.
+ * This is used in error cases, such as allocation failures.
+ */
+static void ft_send_resp_status(struct fc_lport *lport,
+				const struct fc_frame *rx_fp,
+				u32 status, enum fcp_resp_rsp_codes code)
+{
+	struct fc_frame *fp;
+	struct fc_seq *sp;
+	const struct fc_frame_header *fh;
+	size_t len;
+	struct fcp_resp_with_ext *fcp;
+	struct fcp_resp_rsp_info *info;
+
+	fh = fc_frame_header_get(rx_fp);
+	pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
+		  ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
+	len = sizeof(*fcp);
+	if (status == SAM_STAT_GOOD)
+		len += sizeof(*info);
+	fp = fc_frame_alloc(lport, len);
+	if (!fp)
+		return;
+	fcp = fc_frame_payload_get(fp, len);
+	memset(fcp, 0, len);
+	fcp->resp.fr_status = status;
+	if (status == SAM_STAT_GOOD) {
+		fcp->ext.fr_rsp_len = htonl(sizeof(*info));
+		fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
+		info = (struct fcp_resp_rsp_info *)(fcp + 1);
+		info->rsp_code = code;
+	}
+
+	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
+	sp = fr_seq(fp);
+	if (sp) {
+		lport->tt.seq_send(lport, sp, fp);
+		lport->tt.exch_done(sp);
+	} else {
+		lport->tt.frame_send(lport, fp);
+	}
+}
+
+/*
+ * Send error or task management response.
+ */
+static void ft_send_resp_code(struct ft_cmd *cmd,
+			      enum fcp_resp_rsp_codes code)
+{
+	ft_send_resp_status(cmd->sess->tport->lport,
+			    cmd->req_frame, SAM_STAT_GOOD, code);
+}
+
+
+/*
+ * Send error or task management response.
+ * Always frees the cmd and associated state.
+ */
+static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
+				      enum fcp_resp_rsp_codes code)
+{
+	ft_send_resp_code(cmd, code);
+	ft_free_cmd(cmd);
+}
+
+/*
+ * Handle Task Management Request.
+ */
+static void ft_send_tm(struct ft_cmd *cmd)
+{
+	struct fcp_cmnd *fcp;
+	int rc;
+	u8 tm_func;
+
+	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
+
+	switch (fcp->fc_tm_flags) {
+	case FCP_TMF_LUN_RESET:
+		tm_func = TMR_LUN_RESET;
+		break;
+	case FCP_TMF_TGT_RESET:
+		tm_func = TMR_TARGET_WARM_RESET;
+		break;
+	case FCP_TMF_CLR_TASK_SET:
+		tm_func = TMR_CLEAR_TASK_SET;
+		break;
+	case FCP_TMF_ABT_TASK_SET:
+		tm_func = TMR_ABORT_TASK_SET;
+		break;
+	case FCP_TMF_CLR_ACA:
+		tm_func = TMR_CLEAR_ACA;
+		break;
+	default:
+		/*
+		 * FCP4r01 indicates having a combination of
+		 * tm_flags set is invalid.
+		 */
+		pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
+		ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
+		return;
+	}
+
+	/* FIXME: Add referenced task tag for ABORT_TASK */
+	rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
+		&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
+		cmd, tm_func, GFP_KERNEL, 0, 0);
+	if (rc < 0)
+		ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
+}
+
+/*
+ * Send status from completed task management request.
+ */
+void ft_queue_tm_resp(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+	struct se_tmr_req *tmr = se_cmd->se_tmr_req;
+	enum fcp_resp_rsp_codes code;
+
+	if (cmd->aborted)
+		return;
+	switch (tmr->response) {
+	case TMR_FUNCTION_COMPLETE:
+		code = FCP_TMF_CMPL;
+		break;
+	case TMR_LUN_DOES_NOT_EXIST:
+		code = FCP_TMF_INVALID_LUN;
+		break;
+	case TMR_FUNCTION_REJECTED:
+		code = FCP_TMF_REJECTED;
+		break;
+	case TMR_TASK_DOES_NOT_EXIST:
+	case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+	default:
+		code = FCP_TMF_FAILED;
+		break;
+	}
+	pr_debug("tmr fn %d resp %d fcp code %d\n",
+		  tmr->function, tmr->response, code);
+	ft_send_resp_code(cmd, code);
+}
+
+void ft_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
+static void ft_send_work(struct work_struct *work);
+
+/*
+ * Handle incoming FCP command.
+ */
+static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
+{
+	struct ft_cmd *cmd;
+	struct fc_lport *lport = sess->tport->lport;
+	struct se_session *se_sess = sess->se_sess;
+	int tag;
+
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+	if (tag < 0)
+		goto busy;
+
+	cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
+	memset(cmd, 0, sizeof(struct ft_cmd));
+
+	cmd->se_cmd.map_tag = tag;
+	cmd->sess = sess;
+	cmd->seq = lport->tt.seq_assign(lport, fp);
+	if (!cmd->seq) {
+		percpu_ida_free(&se_sess->sess_tag_pool, tag);
+		goto busy;
+	}
+	cmd->req_frame = fp;		/* hold frame during cmd */
+
+	INIT_WORK(&cmd->work, ft_send_work);
+	queue_work(sess->tport->tpg->workqueue, &cmd->work);
+	return;
+
+busy:
+	pr_debug("cmd or seq allocation failure - sending BUSY\n");
+	ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
+	fc_frame_free(fp);
+	ft_sess_put(sess);		/* undo get from lookup */
+}
+
+
+/*
+ * Handle incoming FCP frame.
+ * Caller has verified that the frame is type FCP.
+ */
+void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+	switch (fh->fh_r_ctl) {
+	case FC_RCTL_DD_UNSOL_CMD:	/* command */
+		ft_recv_cmd(sess, fp);
+		break;
+	case FC_RCTL_DD_SOL_DATA:	/* write data */
+	case FC_RCTL_DD_UNSOL_CTL:
+	case FC_RCTL_DD_SOL_CTL:
+	case FC_RCTL_DD_DATA_DESC:	/* transfer ready */
+	case FC_RCTL_ELS4_REQ:		/* SRR, perhaps */
+	default:
+		pr_debug("%s: unhandled frame r_ctl %x\n",
+		       __func__, fh->fh_r_ctl);
+		fc_frame_free(fp);
+		ft_sess_put(sess);	/* undo get from lookup */
+		break;
+	}
+}
+
+/*
+ * Send new command to target.
+ */
+static void ft_send_work(struct work_struct *work)
+{
+	struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
+	struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
+	struct fcp_cmnd *fcp;
+	int data_dir = 0;
+	int task_attr;
+
+	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
+	if (!fcp)
+		goto err;
+
+	if (fcp->fc_flags & FCP_CFL_LEN_MASK)
+		goto err;		/* not handling longer CDBs yet */
+
+	/*
+	 * Check for FCP task management flags
+	 */
+	if (fcp->fc_tm_flags) {
+		ft_send_tm(cmd);
+		return;
+	}
+
+	switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
+	case 0:
+		data_dir = DMA_NONE;
+		break;
+	case FCP_CFL_RDDATA:
+		data_dir = DMA_FROM_DEVICE;
+		break;
+	case FCP_CFL_WRDATA:
+		data_dir = DMA_TO_DEVICE;
+		break;
+	case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
+		goto err;	/* TBD not supported by tcm_fc yet */
+	}
+	/*
+	 * Locate the SAM Task Attr from fc_pri_ta
+	 */
+	switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
+	case FCP_PTA_HEADQ:
+		task_attr = TCM_HEAD_TAG;
+		break;
+	case FCP_PTA_ORDERED:
+		task_attr = TCM_ORDERED_TAG;
+		break;
+	case FCP_PTA_ACA:
+		task_attr = TCM_ACA_TAG;
+		break;
+	case FCP_PTA_SIMPLE: /* Fallthrough */
+	default:
+		task_attr = TCM_SIMPLE_TAG;
+	}
+
+	fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+	cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
+	/*
+	 * Use a single se_cmd->cmd_kref as we expect to release se_cmd
+	 * directly from ft_check_stop_free callback in response path.
+	 */
+	if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
+			      &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
+			      ntohl(fcp->fc_dl), task_attr, data_dir, 0))
+		goto err;
+
+	pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
+	return;
+
+err:
+	ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
+}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
new file mode 100644
index 0000000..85aeaa0
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -0,0 +1,513 @@
+/*******************************************************************************
+ * Filename:  tcm_fc.c
+ *
+ * This file contains the configfs implementation for TCM_fc fabric node.
+ * Based on tcm_loop_configfs.c
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ * Copyright (c) 2009,2010 Rising Tide, Inc.
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/libfc.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "tcm_fc.h"
+
+static LIST_HEAD(ft_wwn_list);
+DEFINE_MUTEX(ft_lport_lock);
+
+unsigned int ft_debug_logging;
+module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+	const char *cp;
+	char c;
+	u32 byte = 0;
+	u32 pos = 0;
+	u32 err;
+	int val;
+
+	*wwn = 0;
+	for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
+		c = *cp;
+		if (c == '\n' && cp[1] == '\0')
+			continue;
+		if (strict && pos++ == 2 && byte++ < 7) {
+			pos = 0;
+			if (c == ':')
+				continue;
+			err = 1;
+			goto fail;
+		}
+		if (c == '\0') {
+			err = 2;
+			if (strict && byte != 8)
+				goto fail;
+			return cp - name;
+		}
+		err = 3;
+		val = hex_to_bin(c);
+		if (val < 0 || (strict && isupper(c)))
+			goto fail;
+		*wwn = (*wwn << 4) | val;
+	}
+	err = 4;
+fail:
+	pr_debug("err %u len %zu pos %u byte %u\n",
+		    err, cp - name, pos, byte);
+	return -1;
+}
+
+ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
+{
+	u8 b[8];
+
+	put_unaligned_be64(wwn, b);
+	return snprintf(buf, len,
+		 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+		 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static ssize_t ft_wwn_show(void *arg, char *buf)
+{
+	u64 *wwn = arg;
+	ssize_t len;
+
+	len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
+	buf[len++] = '\n';
+	return len;
+}
+
+static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
+{
+	ssize_t ret;
+	u64 wwn;
+
+	ret = ft_parse_wwn(buf, &wwn, 0);
+	if (ret > 0)
+		*(u64 *)arg = wwn;
+	return ret;
+}
+
+/*
+ * ACL auth ops.
+ */
+
+static ssize_t ft_nacl_port_name_show(struct config_item *item, char *page)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_show(&acl->node_auth.port_name, page);
+}
+
+static ssize_t ft_nacl_port_name_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_store(&acl->node_auth.port_name, page, count);
+}
+
+static ssize_t ft_nacl_node_name_show(struct config_item *item,
+		char *page)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_show(&acl->node_auth.node_name, page);
+}
+
+static ssize_t ft_nacl_node_name_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_store(&acl->node_auth.node_name, page, count);
+}
+
+CONFIGFS_ATTR(ft_nacl_, node_name);
+CONFIGFS_ATTR(ft_nacl_, port_name);
+
+static struct configfs_attribute *ft_nacl_base_attrs[] = {
+	&ft_nacl_attr_port_name,
+	&ft_nacl_attr_node_name,
+	NULL,
+};
+
+/*
+ * ACL ops.
+ */
+
+/*
+ * Add ACL for an initiator.  The ACL is named arbitrarily.
+ * The port_name and/or node_name are attributes.
+ */
+static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
+{
+	struct ft_node_acl *acl =
+		container_of(nacl, struct ft_node_acl, se_node_acl);
+	u64 wwpn;
+
+	if (ft_parse_wwn(name, &wwpn, 1) < 0)
+		return -EINVAL;
+
+	acl->node_auth.port_name = wwpn;
+	return 0;
+}
+
+struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
+{
+	struct ft_node_acl *found = NULL;
+	struct ft_node_acl *acl;
+	struct se_portal_group *se_tpg = &tpg->se_tpg;
+	struct se_node_acl *se_acl;
+
+	mutex_lock(&se_tpg->acl_node_mutex);
+	list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
+		acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
+		pr_debug("acl %p port_name %llx\n",
+			acl, (unsigned long long)acl->node_auth.port_name);
+		if (acl->node_auth.port_name == rdata->ids.port_name ||
+		    acl->node_auth.node_name == rdata->ids.node_name) {
+			pr_debug("acl %p port_name %llx matched\n", acl,
+				    (unsigned long long)rdata->ids.port_name);
+			found = acl;
+			/* XXX need to hold onto ACL */
+			break;
+		}
+	}
+	mutex_unlock(&se_tpg->acl_node_mutex);
+	return found;
+}
+
+/*
+ * local_port port_group (tpg) ops.
+ */
+static struct se_portal_group *ft_add_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct ft_lport_wwn *ft_wwn;
+	struct ft_tpg *tpg;
+	struct workqueue_struct *wq;
+	unsigned long index;
+	int ret;
+
+	pr_debug("tcm_fc: add tpg %s\n", name);
+
+	/*
+	 * Name must be "tpgt_" followed by the index.
+	 */
+	if (strstr(name, "tpgt_") != name)
+		return NULL;
+
+	ret = kstrtoul(name + 5, 10, &index);
+	if (ret)
+		return NULL;
+	if (index > UINT_MAX)
+		return NULL;
+
+	if ((index != 1)) {
+		pr_err("Error, a single TPG=1 is used for HW port mappings\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
+	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+	if (!tpg)
+		return NULL;
+	tpg->index = index;
+	tpg->lport_wwn = ft_wwn;
+	INIT_LIST_HEAD(&tpg->lun_list);
+
+	wq = alloc_workqueue("tcm_fc", 0, 1);
+	if (!wq) {
+		kfree(tpg);
+		return NULL;
+	}
+
+	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
+	if (ret < 0) {
+		destroy_workqueue(wq);
+		kfree(tpg);
+		return NULL;
+	}
+	tpg->workqueue = wq;
+
+	mutex_lock(&ft_lport_lock);
+	ft_wwn->tpg = tpg;
+	mutex_unlock(&ft_lport_lock);
+
+	return &tpg->se_tpg;
+}
+
+static void ft_del_tpg(struct se_portal_group *se_tpg)
+{
+	struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
+	struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
+
+	pr_debug("del tpg %s\n",
+		    config_item_name(&tpg->se_tpg.tpg_group.cg_item));
+
+	destroy_workqueue(tpg->workqueue);
+
+	/* Wait for sessions to be freed thru RCU, for BUG_ON below */
+	synchronize_rcu();
+
+	mutex_lock(&ft_lport_lock);
+	ft_wwn->tpg = NULL;
+	if (tpg->tport) {
+		tpg->tport->tpg = NULL;
+		tpg->tport = NULL;
+	}
+	mutex_unlock(&ft_lport_lock);
+
+	core_tpg_deregister(se_tpg);
+	kfree(tpg);
+}
+
+/*
+ * Verify that an lport is configured to use the tcm_fc module, and return
+ * the target port group that should be used.
+ *
+ * The caller holds ft_lport_lock.
+ */
+struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
+{
+	struct ft_lport_wwn *ft_wwn;
+
+	list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
+		if (ft_wwn->wwpn == lport->wwpn)
+			return ft_wwn->tpg;
+	}
+	return NULL;
+}
+
+/*
+ * target config instance ops.
+ */
+
+/*
+ * Add lport to allowed config.
+ * The name is the WWPN in lower-case ASCII, colon-separated bytes.
+ */
+static struct se_wwn *ft_add_wwn(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct ft_lport_wwn *ft_wwn;
+	struct ft_lport_wwn *old_ft_wwn;
+	u64 wwpn;
+
+	pr_debug("add wwn %s\n", name);
+	if (ft_parse_wwn(name, &wwpn, 1) < 0)
+		return NULL;
+	ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
+	if (!ft_wwn)
+		return NULL;
+	ft_wwn->wwpn = wwpn;
+
+	mutex_lock(&ft_lport_lock);
+	list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
+		if (old_ft_wwn->wwpn == wwpn) {
+			mutex_unlock(&ft_lport_lock);
+			kfree(ft_wwn);
+			return NULL;
+		}
+	}
+	list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
+	ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
+	mutex_unlock(&ft_lport_lock);
+
+	return &ft_wwn->se_wwn;
+}
+
+static void ft_del_wwn(struct se_wwn *wwn)
+{
+	struct ft_lport_wwn *ft_wwn = container_of(wwn,
+				struct ft_lport_wwn, se_wwn);
+
+	pr_debug("del wwn %s\n", ft_wwn->name);
+	mutex_lock(&ft_lport_lock);
+	list_del(&ft_wwn->ft_wwn_node);
+	mutex_unlock(&ft_lport_lock);
+
+	kfree(ft_wwn);
+}
+
+static ssize_t ft_wwn_version_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
+		""UTS_RELEASE"\n",  utsname()->sysname, utsname()->machine);
+}
+
+CONFIGFS_ATTR_RO(ft_wwn_, version);
+
+static struct configfs_attribute *ft_wwn_attrs[] = {
+	&ft_wwn_attr_version,
+	NULL,
+};
+
+static inline struct ft_tpg *ft_tpg(struct se_portal_group *se_tpg)
+{
+	return container_of(se_tpg, struct ft_tpg, se_tpg);
+}
+
+static char *ft_get_fabric_name(void)
+{
+	return "fc";
+}
+
+static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	return ft_tpg(se_tpg)->lport_wwn->name;
+}
+
+static u16 ft_get_tag(struct se_portal_group *se_tpg)
+{
+	/*
+	 * This tag is used when forming SCSI Name identifier in EVPD=1 0x83
+	 * to represent the SCSI Target Port.
+	 */
+	return ft_tpg(se_tpg)->index;
+}
+
+static int ft_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
+{
+}
+
+static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return ft_tpg(se_tpg)->index;
+}
+
+static const struct target_core_fabric_ops ft_fabric_ops = {
+	.module =			THIS_MODULE,
+	.name =				"fc",
+	.node_acl_size =		sizeof(struct ft_node_acl),
+	.get_fabric_name =		ft_get_fabric_name,
+	.tpg_get_wwn =			ft_get_fabric_wwn,
+	.tpg_get_tag =			ft_get_tag,
+	.tpg_check_demo_mode =		ft_check_false,
+	.tpg_check_demo_mode_cache =	ft_check_false,
+	.tpg_check_demo_mode_write_protect = ft_check_false,
+	.tpg_check_prod_mode_write_protect = ft_check_false,
+	.tpg_get_inst_index =		ft_tpg_get_inst_index,
+	.check_stop_free =		ft_check_stop_free,
+	.release_cmd =			ft_release_cmd,
+	.shutdown_session =		ft_sess_shutdown,
+	.close_session =		ft_sess_close,
+	.sess_get_index =		ft_sess_get_index,
+	.sess_get_initiator_sid =	NULL,
+	.write_pending =		ft_write_pending,
+	.write_pending_status =		ft_write_pending_status,
+	.set_default_node_attributes =	ft_set_default_node_attr,
+	.get_cmd_state =		ft_get_cmd_state,
+	.queue_data_in =		ft_queue_data_in,
+	.queue_status =			ft_queue_status,
+	.queue_tm_rsp =			ft_queue_tm_resp,
+	.aborted_task =			ft_aborted_task,
+	/*
+	 * Setup function pointers for generic logic in
+	 * target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn =		&ft_add_wwn,
+	.fabric_drop_wwn =		&ft_del_wwn,
+	.fabric_make_tpg =		&ft_add_tpg,
+	.fabric_drop_tpg =		&ft_del_tpg,
+	.fabric_init_nodeacl =		&ft_init_nodeacl,
+
+	.tfc_wwn_attrs			= ft_wwn_attrs,
+	.tfc_tpg_nacl_base_attrs	= ft_nacl_base_attrs,
+};
+
+static struct notifier_block ft_notifier = {
+	.notifier_call = ft_lport_notify
+};
+
+static int __init ft_init(void)
+{
+	int ret;
+
+	ret = target_register_template(&ft_fabric_ops);
+	if (ret)
+		goto out;
+
+	ret = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov);
+	if (ret)
+		goto out_unregister_template;
+
+	blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
+	fc_lport_iterate(ft_lport_add, NULL);
+	return 0;
+
+out_unregister_template:
+	target_unregister_template(&ft_fabric_ops);
+out:
+	return ret;
+}
+
+static void __exit ft_exit(void)
+{
+	blocking_notifier_chain_unregister(&fc_lport_notifier_head,
+					   &ft_notifier);
+	fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
+	fc_lport_iterate(ft_lport_del, NULL);
+	target_unregister_template(&ft_fabric_ops);
+	synchronize_rcu();
+}
+
+MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
+MODULE_LICENSE("GPL");
+module_init(ft_init);
+module_exit(ft_exit);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
new file mode 100644
index 0000000..847c1aa
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
+ * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
+ *
+ * Copyright (c) 2007 Intel Corporation. All rights reserved.
+ * Copyright (c) 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2008 Mike Christie
+ * Copyright (c) 2009 Rising Tide, Inc.
+ * Copyright (c) 2009 Linux-iSCSI.org
+ * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* XXX TBD some includes may be extraneous */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/hash.h>
+#include <linux/ratelimit.h>
+#include <asm/unaligned.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "tcm_fc.h"
+
+/*
+ * Deliver read data back to initiator.
+ * XXX TBD handle resource problems later.
+ */
+int ft_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+	struct fc_frame *fp = NULL;
+	struct fc_exch *ep;
+	struct fc_lport *lport;
+	struct scatterlist *sg = NULL;
+	size_t remaining;
+	u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
+	u32 mem_off = 0;
+	u32 fh_off = 0;
+	u32 frame_off = 0;
+	size_t frame_len = 0;
+	size_t mem_len = 0;
+	size_t tlen;
+	size_t off_in_page;
+	struct page *page = NULL;
+	int use_sg;
+	int error;
+	void *page_addr;
+	void *from;
+	void *to = NULL;
+
+	if (cmd->aborted)
+		return 0;
+
+	if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
+		goto queue_status;
+
+	ep = fc_seq_exch(cmd->seq);
+	lport = ep->lp;
+	cmd->seq = lport->tt.seq_start_next(cmd->seq);
+
+	remaining = se_cmd->data_length;
+
+	/*
+	 * Setup to use first mem list entry, unless no data.
+	 */
+	BUG_ON(remaining && !se_cmd->t_data_sg);
+	if (remaining) {
+		sg = se_cmd->t_data_sg;
+		mem_len = sg->length;
+		mem_off = sg->offset;
+		page = sg_page(sg);
+	}
+
+	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
+	use_sg = !(remaining % 4);
+
+	while (remaining) {
+		struct fc_seq *seq = cmd->seq;
+
+		if (!seq) {
+			pr_debug("%s: Command aborted, xid 0x%x\n",
+				 __func__, ep->xid);
+			break;
+		}
+		if (!mem_len) {
+			sg = sg_next(sg);
+			mem_len = min((size_t)sg->length, remaining);
+			mem_off = sg->offset;
+			page = sg_page(sg);
+		}
+		if (!frame_len) {
+			/*
+			 * If lport's has capability of Large Send Offload LSO)
+			 * , then allow 'frame_len' to be as big as 'lso_max'
+			 * if indicated transfer length is >= lport->lso_max
+			 */
+			frame_len = (lport->seq_offload) ? lport->lso_max :
+							  cmd->sess->max_frame;
+			frame_len = min(frame_len, remaining);
+			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
+			if (!fp)
+				return -ENOMEM;
+			to = fc_frame_payload_get(fp, 0);
+			fh_off = frame_off;
+			frame_off += frame_len;
+			/*
+			 * Setup the frame's max payload which is used by base
+			 * driver to indicate HW about max frame size, so that
+			 * HW can do fragmentation appropriately based on
+			 * "gso_max_size" of underline netdev.
+			 */
+			fr_max_payload(fp) = cmd->sess->max_frame;
+		}
+		tlen = min(mem_len, frame_len);
+
+		if (use_sg) {
+			off_in_page = mem_off;
+			BUG_ON(!page);
+			get_page(page);
+			skb_fill_page_desc(fp_skb(fp),
+					   skb_shinfo(fp_skb(fp))->nr_frags,
+					   page, off_in_page, tlen);
+			fr_len(fp) += tlen;
+			fp_skb(fp)->data_len += tlen;
+			fp_skb(fp)->truesize +=
+					PAGE_SIZE << compound_order(page);
+		} else {
+			BUG_ON(!page);
+			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
+			page_addr = from;
+			from += mem_off & ~PAGE_MASK;
+			tlen = min(tlen, (size_t)(PAGE_SIZE -
+						(mem_off & ~PAGE_MASK)));
+			memcpy(to, from, tlen);
+			kunmap_atomic(page_addr);
+			to += tlen;
+		}
+
+		mem_off += tlen;
+		mem_len -= tlen;
+		frame_len -= tlen;
+		remaining -= tlen;
+
+		if (frame_len &&
+		    (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
+			continue;
+		if (!remaining)
+			f_ctl |= FC_FC_END_SEQ;
+		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
+			       FC_TYPE_FCP, f_ctl, fh_off);
+		error = lport->tt.seq_send(lport, seq, fp);
+		if (error) {
+			pr_info_ratelimited("%s: Failed to send frame %p, "
+						"xid <0x%x>, remaining %zu, "
+						"lso_max <0x%x>\n",
+						__func__, fp, ep->xid,
+						remaining, lport->lso_max);
+			/*
+			 * Go ahead and set TASK_SET_FULL status ignoring the
+			 * rest of the DataIN, and immediately attempt to
+			 * send the response via ft_queue_status() in order
+			 * to notify the initiator that it should reduce it's
+			 * per LUN queue_depth.
+			 */
+			se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+			break;
+		}
+	}
+queue_status:
+	return ft_queue_status(se_cmd);
+}
+
+static void ft_execute_work(struct work_struct *work)
+{
+	struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
+
+	target_execute_cmd(&cmd->se_cmd);
+}
+
+/*
+ * Receive write data frame.
+ */
+void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct fc_seq *seq = cmd->seq;
+	struct fc_exch *ep;
+	struct fc_lport *lport;
+	struct fc_frame_header *fh;
+	struct scatterlist *sg = NULL;
+	u32 mem_off = 0;
+	u32 rel_off;
+	size_t frame_len;
+	size_t mem_len = 0;
+	size_t tlen;
+	struct page *page = NULL;
+	void *page_addr;
+	void *from;
+	void *to;
+	u32 f_ctl;
+	void *buf;
+
+	fh = fc_frame_header_get(fp);
+	if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
+		goto drop;
+
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	ep = fc_seq_exch(seq);
+	lport = ep->lp;
+	if (cmd->was_ddp_setup) {
+		BUG_ON(!ep);
+		BUG_ON(!lport);
+		/*
+		 * Since DDP (Large Rx offload) was setup for this request,
+		 * payload is expected to be copied directly to user buffers.
+		 */
+		buf = fc_frame_payload_get(fp, 1);
+		if (buf)
+			pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+				"cmd->sg_cnt 0x%x. DDP was setup"
+				" hence not expected to receive frame with "
+				"payload, Frame will be dropped if"
+				"'Sequence Initiative' bit in f_ctl is"
+				"not set\n", __func__, ep->xid, f_ctl,
+				se_cmd->t_data_sg, se_cmd->t_data_nents);
+		/*
+		 * Invalidate HW DDP context if it was setup for respective
+		 * command. Invalidation of HW DDP context is requited in both
+		 * situation (success and error).
+		 */
+		ft_invl_hw_context(cmd);
+
+		/*
+		 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+		 * write data frame is received successfully where payload is
+		 * posted directly to user buffer and only the last frame's
+		 * header is posted in receive queue.
+		 *
+		 * If "Sequence Initiative (TSI)" bit is not set, means error
+		 * condition w.r.t. DDP, hence drop the packet and let explict
+		 * ABORTS from other end of exchange timer trigger the recovery.
+		 */
+		if (f_ctl & FC_FC_SEQ_INIT)
+			goto last_frame;
+		else
+			goto drop;
+	}
+
+	rel_off = ntohl(fh->fh_parm_offset);
+	frame_len = fr_len(fp);
+	if (frame_len <= sizeof(*fh))
+		goto drop;
+	frame_len -= sizeof(*fh);
+	from = fc_frame_payload_get(fp, 0);
+	if (rel_off >= se_cmd->data_length)
+		goto drop;
+	if (frame_len + rel_off > se_cmd->data_length)
+		frame_len = se_cmd->data_length - rel_off;
+
+	/*
+	 * Setup to use first mem list entry, unless no data.
+	 */
+	BUG_ON(frame_len && !se_cmd->t_data_sg);
+	if (frame_len) {
+		sg = se_cmd->t_data_sg;
+		mem_len = sg->length;
+		mem_off = sg->offset;
+		page = sg_page(sg);
+	}
+
+	while (frame_len) {
+		if (!mem_len) {
+			sg = sg_next(sg);
+			mem_len = sg->length;
+			mem_off = sg->offset;
+			page = sg_page(sg);
+		}
+		if (rel_off >= mem_len) {
+			rel_off -= mem_len;
+			mem_len = 0;
+			continue;
+		}
+		mem_off += rel_off;
+		mem_len -= rel_off;
+		rel_off = 0;
+
+		tlen = min(mem_len, frame_len);
+
+		to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
+		page_addr = to;
+		to += mem_off & ~PAGE_MASK;
+		tlen = min(tlen, (size_t)(PAGE_SIZE -
+					  (mem_off & ~PAGE_MASK)));
+		memcpy(to, from, tlen);
+		kunmap_atomic(page_addr);
+
+		from += tlen;
+		frame_len -= tlen;
+		mem_off += tlen;
+		mem_len -= tlen;
+		cmd->write_data_len += tlen;
+	}
+last_frame:
+	if (cmd->write_data_len == se_cmd->data_length) {
+		INIT_WORK(&cmd->work, ft_execute_work);
+		queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
+	}
+drop:
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle and cleanup any HW specific resources if
+ * received ABORTS, errors, timeouts.
+ */
+void ft_invl_hw_context(struct ft_cmd *cmd)
+{
+	struct fc_seq *seq;
+	struct fc_exch *ep = NULL;
+	struct fc_lport *lport = NULL;
+
+	BUG_ON(!cmd);
+	seq = cmd->seq;
+
+	/* Cleanup the DDP context in HW if DDP was setup */
+	if (cmd->was_ddp_setup && seq) {
+		ep = fc_seq_exch(seq);
+		if (ep) {
+			lport = ep->lp;
+			if (lport && (ep->xid <= lport->lro_xid)) {
+				/*
+				 * "ddp_done" trigger invalidation of HW
+				 * specific DDP context
+				 */
+				cmd->write_data_len = lport->tt.ddp_done(lport,
+								      ep->xid);
+
+				/*
+				 * Resetting same variable to indicate HW's
+				 * DDP context has been invalidated to avoid
+				 * re_invalidation of same context (context is
+				 * identified using ep->xid)
+				 */
+				cmd->was_ddp_setup = 0;
+			}
+		}
+	}
+}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
new file mode 100644
index 0000000..7b934ea
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* XXX TBD some includes may be extraneous */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/hash.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist.h>
+#include <linux/kref.h>
+#include <asm/unaligned.h>
+#include <scsi/libfc.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "tcm_fc.h"
+
+static void ft_sess_delete_all(struct ft_tport *);
+
+/*
+ * Lookup or allocate target local port.
+ * Caller holds ft_lport_lock.
+ */
+static struct ft_tport *ft_tport_get(struct fc_lport *lport)
+{
+	struct ft_tpg *tpg;
+	struct ft_tport *tport;
+	int i;
+
+	tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+					  lockdep_is_held(&ft_lport_lock));
+	if (tport && tport->tpg)
+		return tport;
+
+	tpg = ft_lport_find_tpg(lport);
+	if (!tpg)
+		return NULL;
+
+	if (tport) {
+		tport->tpg = tpg;
+		tpg->tport = tport;
+		return tport;
+	}
+
+	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
+	if (!tport)
+		return NULL;
+
+	tport->lport = lport;
+	tport->tpg = tpg;
+	tpg->tport = tport;
+	for (i = 0; i < FT_SESS_HASH_SIZE; i++)
+		INIT_HLIST_HEAD(&tport->hash[i]);
+
+	rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
+	return tport;
+}
+
+/*
+ * Delete a target local port.
+ * Caller holds ft_lport_lock.
+ */
+static void ft_tport_delete(struct ft_tport *tport)
+{
+	struct fc_lport *lport;
+	struct ft_tpg *tpg;
+
+	ft_sess_delete_all(tport);
+	lport = tport->lport;
+	BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
+	RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
+
+	tpg = tport->tpg;
+	if (tpg) {
+		tpg->tport = NULL;
+		tport->tpg = NULL;
+	}
+	kfree_rcu(tport, rcu);
+}
+
+/*
+ * Add local port.
+ * Called thru fc_lport_iterate().
+ */
+void ft_lport_add(struct fc_lport *lport, void *arg)
+{
+	mutex_lock(&ft_lport_lock);
+	ft_tport_get(lport);
+	mutex_unlock(&ft_lport_lock);
+}
+
+/*
+ * Delete local port.
+ * Called thru fc_lport_iterate().
+ */
+void ft_lport_del(struct fc_lport *lport, void *arg)
+{
+	struct ft_tport *tport;
+
+	mutex_lock(&ft_lport_lock);
+	tport = lport->prov[FC_TYPE_FCP];
+	if (tport)
+		ft_tport_delete(tport);
+	mutex_unlock(&ft_lport_lock);
+}
+
+/*
+ * Notification of local port change from libfc.
+ * Create or delete local port and associated tport.
+ */
+int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
+{
+	struct fc_lport *lport = arg;
+
+	switch (event) {
+	case FC_LPORT_EV_ADD:
+		ft_lport_add(lport, NULL);
+		break;
+	case FC_LPORT_EV_DEL:
+		ft_lport_del(lport, NULL);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+/*
+ * Hash function for FC_IDs.
+ */
+static u32 ft_sess_hash(u32 port_id)
+{
+	return hash_32(port_id, FT_SESS_HASH_BITS);
+}
+
+/*
+ * Find session in local port.
+ * Sessions and hash lists are RCU-protected.
+ * A reference is taken which must be eventually freed.
+ */
+static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
+{
+	struct ft_tport *tport;
+	struct hlist_head *head;
+	struct ft_sess *sess;
+
+	rcu_read_lock();
+	tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+	if (!tport)
+		goto out;
+
+	head = &tport->hash[ft_sess_hash(port_id)];
+	hlist_for_each_entry_rcu(sess, head, hash) {
+		if (sess->port_id == port_id) {
+			kref_get(&sess->kref);
+			rcu_read_unlock();
+			pr_debug("port_id %x found %p\n", port_id, sess);
+			return sess;
+		}
+	}
+out:
+	rcu_read_unlock();
+	pr_debug("port_id %x not found\n", port_id);
+	return NULL;
+}
+
+/*
+ * Allocate session and enter it in the hash for the local port.
+ * Caller holds ft_lport_lock.
+ */
+static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
+				      struct ft_node_acl *acl)
+{
+	struct ft_sess *sess;
+	struct hlist_head *head;
+
+	head = &tport->hash[ft_sess_hash(port_id)];
+	hlist_for_each_entry_rcu(sess, head, hash)
+		if (sess->port_id == port_id)
+			return sess;
+
+	sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+	if (!sess)
+		return NULL;
+
+	sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
+						    sizeof(struct ft_cmd),
+						    TARGET_PROT_NORMAL);
+	if (IS_ERR(sess->se_sess)) {
+		kfree(sess);
+		return NULL;
+	}
+	sess->se_sess->se_node_acl = &acl->se_node_acl;
+	sess->tport = tport;
+	sess->port_id = port_id;
+	kref_init(&sess->kref);	/* ref for table entry */
+	hlist_add_head_rcu(&sess->hash, head);
+	tport->sess_count++;
+
+	pr_debug("port_id %x sess %p\n", port_id, sess);
+
+	transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
+				   sess->se_sess, sess);
+	return sess;
+}
+
+/*
+ * Unhash the session.
+ * Caller holds ft_lport_lock.
+ */
+static void ft_sess_unhash(struct ft_sess *sess)
+{
+	struct ft_tport *tport = sess->tport;
+
+	hlist_del_rcu(&sess->hash);
+	BUG_ON(!tport->sess_count);
+	tport->sess_count--;
+	sess->port_id = -1;
+	sess->params = 0;
+}
+
+/*
+ * Delete session from hash.
+ * Caller holds ft_lport_lock.
+ */
+static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
+{
+	struct hlist_head *head;
+	struct ft_sess *sess;
+
+	head = &tport->hash[ft_sess_hash(port_id)];
+	hlist_for_each_entry_rcu(sess, head, hash) {
+		if (sess->port_id == port_id) {
+			ft_sess_unhash(sess);
+			return sess;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Delete all sessions from tport.
+ * Caller holds ft_lport_lock.
+ */
+static void ft_sess_delete_all(struct ft_tport *tport)
+{
+	struct hlist_head *head;
+	struct ft_sess *sess;
+
+	for (head = tport->hash;
+	     head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
+		hlist_for_each_entry_rcu(sess, head, hash) {
+			ft_sess_unhash(sess);
+			transport_deregister_session_configfs(sess->se_sess);
+			ft_sess_put(sess);	/* release from table */
+		}
+	}
+}
+
+/*
+ * TCM ops for sessions.
+ */
+
+/*
+ * Determine whether session is allowed to be shutdown in the current context.
+ * Returns non-zero if the session should be shutdown.
+ */
+int ft_sess_shutdown(struct se_session *se_sess)
+{
+	struct ft_sess *sess = se_sess->fabric_sess_ptr;
+
+	pr_debug("port_id %x\n", sess->port_id);
+	return 1;
+}
+
+/*
+ * Remove session and send PRLO.
+ * This is called when the ACL is being deleted or queue depth is changing.
+ */
+void ft_sess_close(struct se_session *se_sess)
+{
+	struct ft_sess *sess = se_sess->fabric_sess_ptr;
+	u32 port_id;
+
+	mutex_lock(&ft_lport_lock);
+	port_id = sess->port_id;
+	if (port_id == -1) {
+		mutex_unlock(&ft_lport_lock);
+		return;
+	}
+	pr_debug("port_id %x\n", port_id);
+	ft_sess_unhash(sess);
+	mutex_unlock(&ft_lport_lock);
+	transport_deregister_session_configfs(se_sess);
+	ft_sess_put(sess);
+	/* XXX Send LOGO or PRLO */
+	synchronize_rcu();		/* let transport deregister happen */
+}
+
+u32 ft_sess_get_index(struct se_session *se_sess)
+{
+	struct ft_sess *sess = se_sess->fabric_sess_ptr;
+
+	return sess->port_id;	/* XXX TBD probably not what is needed */
+}
+
+u32 ft_sess_get_port_name(struct se_session *se_sess,
+			  unsigned char *buf, u32 len)
+{
+	struct ft_sess *sess = se_sess->fabric_sess_ptr;
+
+	return ft_format_wwn(buf, len, sess->port_name);
+}
+
+/*
+ * libfc ops involving sessions.
+ */
+
+static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
+			  const struct fc_els_spp *rspp, struct fc_els_spp *spp)
+{
+	struct ft_tport *tport;
+	struct ft_sess *sess;
+	struct ft_node_acl *acl;
+	u32 fcp_parm;
+
+	tport = ft_tport_get(rdata->local_port);
+	if (!tport)
+		goto not_target;	/* not a target for this local port */
+
+	acl = ft_acl_get(tport->tpg, rdata);
+	if (!acl)
+		goto not_target;	/* no target for this remote */
+
+	if (!rspp)
+		goto fill;
+
+	if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
+		return FC_SPP_RESP_NO_PA;
+
+	/*
+	 * If both target and initiator bits are off, the SPP is invalid.
+	 */
+	fcp_parm = ntohl(rspp->spp_params);
+	if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
+		return FC_SPP_RESP_INVL;
+
+	/*
+	 * Create session (image pair) only if requested by
+	 * EST_IMG_PAIR flag and if the requestor is an initiator.
+	 */
+	if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
+		spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
+		if (!(fcp_parm & FCP_SPPF_INIT_FCN))
+			return FC_SPP_RESP_CONF;
+		sess = ft_sess_create(tport, rdata->ids.port_id, acl);
+		if (!sess)
+			return FC_SPP_RESP_RES;
+		if (!sess->params)
+			rdata->prli_count++;
+		sess->params = fcp_parm;
+		sess->port_name = rdata->ids.port_name;
+		sess->max_frame = rdata->maxframe_size;
+
+		/* XXX TBD - clearing actions.  unit attn, see 4.10 */
+	}
+
+	/*
+	 * OR in our service parameters with other provider (initiator), if any.
+	 */
+fill:
+	fcp_parm = ntohl(spp->spp_params);
+	fcp_parm &= ~FCP_SPPF_RETRY;
+	spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
+	return FC_SPP_RESP_ACK;
+
+not_target:
+	fcp_parm = ntohl(spp->spp_params);
+	fcp_parm &= ~FCP_SPPF_TARG_FCN;
+	spp->spp_params = htonl(fcp_parm);
+	return 0;
+}
+
+/**
+ * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page (NULL for outgoing PRLI)
+ * @spp: response service parameter page
+ *
+ * Returns spp response code.
+ */
+static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
+		   const struct fc_els_spp *rspp, struct fc_els_spp *spp)
+{
+	int ret;
+
+	mutex_lock(&ft_lport_lock);
+	ret = ft_prli_locked(rdata, spp_len, rspp, spp);
+	mutex_unlock(&ft_lport_lock);
+	pr_debug("port_id %x flags %x ret %x\n",
+	       rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
+	return ret;
+}
+
+static void ft_sess_free(struct kref *kref)
+{
+	struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
+
+	transport_deregister_session(sess->se_sess);
+	kfree_rcu(sess, rcu);
+}
+
+void ft_sess_put(struct ft_sess *sess)
+{
+	int sess_held = atomic_read(&sess->kref.refcount);
+
+	BUG_ON(!sess_held);
+	kref_put(&sess->kref, ft_sess_free);
+}
+
+static void ft_prlo(struct fc_rport_priv *rdata)
+{
+	struct ft_sess *sess;
+	struct ft_tport *tport;
+
+	mutex_lock(&ft_lport_lock);
+	tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
+					  lockdep_is_held(&ft_lport_lock));
+
+	if (!tport) {
+		mutex_unlock(&ft_lport_lock);
+		return;
+	}
+	sess = ft_sess_delete(tport, rdata->ids.port_id);
+	if (!sess) {
+		mutex_unlock(&ft_lport_lock);
+		return;
+	}
+	mutex_unlock(&ft_lport_lock);
+	transport_deregister_session_configfs(sess->se_sess);
+	ft_sess_put(sess);		/* release from table */
+	rdata->prli_count--;
+	/* XXX TBD - clearing actions.  unit attn, see 4.10 */
+}
+
+/*
+ * Handle incoming FCP request.
+ * Caller has verified that the frame is type FCP.
+ */
+static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct ft_sess *sess;
+	u32 sid = fc_frame_sid(fp);
+
+	pr_debug("sid %x\n", sid);
+
+	sess = ft_sess_get(lport, sid);
+	if (!sess) {
+		pr_debug("sid %x sess lookup failed\n", sid);
+		/* TBD XXX - if FCP_CMND, send PRLO */
+		fc_frame_free(fp);
+		return;
+	}
+	ft_recv_req(sess, fp);	/* must do ft_sess_put() */
+}
+
+/*
+ * Provider ops for libfc.
+ */
+struct fc4_prov ft_prov = {
+	.prli = ft_prli,
+	.prlo = ft_prlo,
+	.recv = ft_recv,
+	.module = THIS_MODULE,
+};