File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/security/Kconfig b/security/Kconfig
new file mode 100644
index 0000000..e452378
--- /dev/null
+++ b/security/Kconfig
@@ -0,0 +1,167 @@
+#
+# Security configuration
+#
+
+menu "Security options"
+
+source security/keys/Kconfig
+
+config SECURITY_DMESG_RESTRICT
+	bool "Restrict unprivileged access to the kernel syslog"
+	default n
+	help
+	  This enforces restrictions on unprivileged users reading the kernel
+	  syslog via dmesg(8).
+
+	  If this option is not selected, no restrictions will be enforced
+	  unless the dmesg_restrict sysctl is explicitly set to (1).
+
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY
+	bool "Enable different security models"
+	depends on SYSFS
+	depends on MULTIUSER
+	help
+	  This allows you to choose different security modules to be
+	  configured into your kernel.
+
+	  If this option is not selected, the default Linux security
+	  model will be used.
+
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITYFS
+	bool "Enable the securityfs filesystem"
+	help
+	  This will build the securityfs filesystem.  It is currently used by
+	  the TPM bios character driver and IMA, an integrity provider.  It is
+	  not used by SELinux or SMACK.
+
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_NETWORK
+	bool "Socket and Networking Security Hooks"
+	depends on SECURITY
+	help
+	  This enables the socket and networking security hooks.
+	  If enabled, a security module can use these hooks to
+	  implement socket and networking access controls.
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_NETWORK_XFRM
+	bool "XFRM (IPSec) Networking Security Hooks"
+	depends on XFRM && SECURITY_NETWORK
+	help
+	  This enables the XFRM (IPSec) networking security hooks.
+	  If enabled, a security module can use these hooks to
+	  implement per-packet access controls based on labels
+	  derived from IPSec policy.  Non-IPSec communications are
+	  designated as unlabelled, and only sockets authorized
+	  to communicate unlabelled data can send without using
+	  IPSec.
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_PATH
+	bool "Security hooks for pathname based access control"
+	depends on SECURITY
+	help
+	  This enables the security hooks for pathname based access control.
+	  If enabled, a security module can use these hooks to
+	  implement pathname based access controls.
+	  If you are unsure how to answer this question, answer N.
+
+config INTEL_TXT
+	bool "Enable Intel(R) Trusted Execution Technology (Intel(R) TXT)"
+	depends on HAVE_INTEL_TXT
+	help
+	  This option enables support for booting the kernel with the
+	  Trusted Boot (tboot) module. This will utilize
+	  Intel(R) Trusted Execution Technology to perform a measured launch
+	  of the kernel. If the system does not support Intel(R) TXT, this
+	  will have no effect.
+
+	  Intel TXT will provide higher assurance of system configuration and
+	  initial state as well as data reset protection.  This is used to
+	  create a robust initial kernel measurement and verification, which
+	  helps to ensure that kernel security mechanisms are functioning
+	  correctly. This level of protection requires a root of trust outside
+	  of the kernel itself.
+
+	  Intel TXT also helps solve real end user concerns about having
+	  confidence that their hardware is running the VMM or kernel that
+	  it was configured with, especially since they may be responsible for
+	  providing such assurances to VMs and services running on it.
+
+	  See <http://www.intel.com/technology/security/> for more information
+	  about Intel(R) TXT.
+	  See <http://tboot.sourceforge.net> for more information about tboot.
+	  See Documentation/intel_txt.txt for a description of how to enable
+	  Intel TXT support in a kernel boot.
+
+	  If you are unsure as to whether this is required, answer N.
+
+config LSM_MMAP_MIN_ADDR
+	int "Low address space for LSM to protect from user allocation"
+	depends on SECURITY && SECURITY_SELINUX
+	default 32768 if ARM || (ARM64 && COMPAT)
+	default 65536
+	help
+	  This is the portion of low virtual memory which should be protected
+	  from userspace allocation.  Keeping a user from writing to low pages
+	  can help reduce the impact of kernel NULL pointer bugs.
+
+	  For most ia64, ppc64 and x86 users with lots of address space
+	  a value of 65536 is reasonable and should cause no problems.
+	  On arm and other archs it should not be higher than 32768.
+	  Programs which use vm86 functionality or have some need to map
+	  this low address space will need the permission specific to the
+	  systems running LSM.
+
+source security/selinux/Kconfig
+source security/smack/Kconfig
+source security/tomoyo/Kconfig
+source security/apparmor/Kconfig
+source security/yama/Kconfig
+
+source security/integrity/Kconfig
+
+choice
+	prompt "Default security module"
+	default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
+	default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
+	default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
+	default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
+	default DEFAULT_SECURITY_DAC
+
+	help
+	  Select the security module that will be used by default if the
+	  kernel parameter security= is not specified.
+
+	config DEFAULT_SECURITY_SELINUX
+		bool "SELinux" if SECURITY_SELINUX=y
+
+	config DEFAULT_SECURITY_SMACK
+		bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
+
+	config DEFAULT_SECURITY_TOMOYO
+		bool "TOMOYO" if SECURITY_TOMOYO=y
+
+	config DEFAULT_SECURITY_APPARMOR
+		bool "AppArmor" if SECURITY_APPARMOR=y
+
+	config DEFAULT_SECURITY_DAC
+		bool "Unix Discretionary Access Controls"
+
+endchoice
+
+config DEFAULT_SECURITY
+	string
+	default "selinux" if DEFAULT_SECURITY_SELINUX
+	default "smack" if DEFAULT_SECURITY_SMACK
+	default "tomoyo" if DEFAULT_SECURITY_TOMOYO
+	default "apparmor" if DEFAULT_SECURITY_APPARMOR
+	default "" if DEFAULT_SECURITY_DAC
+
+endmenu
+
diff --git a/security/Makefile b/security/Makefile
new file mode 100644
index 0000000..c9bfbc8
--- /dev/null
+++ b/security/Makefile
@@ -0,0 +1,29 @@
+#
+# Makefile for the kernel security code
+#
+
+obj-$(CONFIG_KEYS)			+= keys/
+subdir-$(CONFIG_SECURITY_SELINUX)	+= selinux
+subdir-$(CONFIG_SECURITY_SMACK)		+= smack
+subdir-$(CONFIG_SECURITY_TOMOYO)        += tomoyo
+subdir-$(CONFIG_SECURITY_APPARMOR)	+= apparmor
+subdir-$(CONFIG_SECURITY_YAMA)		+= yama
+
+# always enable default capabilities
+obj-y					+= commoncap.o
+obj-$(CONFIG_MMU)			+= min_addr.o
+
+# Object file lists
+obj-$(CONFIG_SECURITY)			+= security.o
+obj-$(CONFIG_SECURITYFS)		+= inode.o
+obj-$(CONFIG_SECURITY_SELINUX)		+= selinux/
+obj-$(CONFIG_SECURITY_SMACK)		+= smack/
+obj-$(CONFIG_AUDIT)			+= lsm_audit.o
+obj-$(CONFIG_SECURITY_TOMOYO)		+= tomoyo/
+obj-$(CONFIG_SECURITY_APPARMOR)		+= apparmor/
+obj-$(CONFIG_SECURITY_YAMA)		+= yama/
+obj-$(CONFIG_CGROUP_DEVICE)		+= device_cgroup.o
+
+# Object integrity file lists
+subdir-$(CONFIG_INTEGRITY)		+= integrity
+obj-$(CONFIG_INTEGRITY)			+= integrity/
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
new file mode 100644
index 0000000..9cdec70
--- /dev/null
+++ b/security/apparmor/.gitignore
@@ -0,0 +1,5 @@
+#
+# Generated include files
+#
+capability_names.h
+rlim_names.h
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
new file mode 100644
index 0000000..232469b
--- /dev/null
+++ b/security/apparmor/Kconfig
@@ -0,0 +1,43 @@
+config SECURITY_APPARMOR
+	bool "AppArmor support"
+	depends on SECURITY && NET
+	select AUDIT
+	select SECURITY_PATH
+	select SECURITYFS
+	select SECURITY_NETWORK
+	default n
+	help
+	  This enables the AppArmor security module.
+	  Required userspace tools (if they are not included in your
+	  distribution) and further information may be found at
+	  http://apparmor.wiki.kernel.org
+
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_APPARMOR_BOOTPARAM_VALUE
+	int "AppArmor boot parameter default value"
+	depends on SECURITY_APPARMOR
+	range 0 1
+	default 1
+	help
+	  This option sets the default value for the kernel parameter
+	  'apparmor', which allows AppArmor to be enabled or disabled
+          at boot.  If this option is set to 0 (zero), the AppArmor
+	  kernel parameter will default to 0, disabling AppArmor at
+	  boot.  If this option is set to 1 (one), the AppArmor
+	  kernel parameter will default to 1, enabling AppArmor at
+	  boot.
+
+	  If you are unsure how to answer this question, answer 1.
+
+config SECURITY_APPARMOR_HASH
+	bool "SHA1 hash of loaded profiles"
+	depends on SECURITY_APPARMOR
+	select CRYPTO
+	select CRYPTO_SHA1
+	default y
+
+	help
+	  This option selects whether sha1 hashing is done against loaded
+          profiles and exported for inspection to user space via the apparmor
+          filesystem.
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
new file mode 100644
index 0000000..d693df8
--- /dev/null
+++ b/security/apparmor/Makefile
@@ -0,0 +1,70 @@
+# Makefile for AppArmor Linux Security Module
+#
+obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
+
+apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
+              path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
+              resource.o sid.o file.o
+apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
+
+clean-files := capability_names.h rlim_names.h
+
+
+# Build a lower case string table of capability names
+# Transforms lines from
+#    #define CAP_DAC_OVERRIDE     1
+# to
+#    [1] = "dac_override",
+quiet_cmd_make-caps = GEN     $@
+cmd_make-caps = echo "static const char *const capability_names[] = {" > $@ ;\
+	sed $< >>$@ -r -n -e '/CAP_FS_MASK/d' \
+	-e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/[\2] = "\L\1",/p';\
+	echo "};" >> $@ ;\
+	echo -n '\#define AA_FS_CAPS_MASK "' >> $@ ;\
+	sed $< -r -n -e '/CAP_FS_MASK/d' \
+	    -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/\L\1/p' | \
+	     tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
+
+
+# Build a lower case string table of rlimit names.
+# Transforms lines from
+#    #define RLIMIT_STACK		3	/* max stack size */
+# to
+#    [RLIMIT_STACK] = "stack",
+#
+# and build a second integer table (with the second sed cmd), that maps
+# RLIMIT defines to the order defined in asm-generic/resource.h  This is
+# required by policy load to map policy ordering of RLIMITs to internal
+# ordering for architectures that redefine an RLIMIT.
+# Transforms lines from
+#    #define RLIMIT_STACK		3	/* max stack size */
+# to
+# RLIMIT_STACK, 
+#
+# and build the securityfs entries for the mapping.
+# Transforms lines from
+#    #define RLIMIT_FSIZE        1   /* Maximum filesize */
+#    #define RLIMIT_STACK		3	/* max stack size */
+# to
+# #define AA_FS_RLIMIT_MASK "fsize stack"
+quiet_cmd_make-rlim = GEN     $@
+cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \
+	> $@ ;\
+	sed $< >> $@ -r -n \
+	    -e 's/^\# ?define[ \t]+(RLIMIT_([A-Z0-9_]+)).*/[\1] = "\L\2",/p';\
+	echo "};" >> $@ ;\
+	echo "static const int rlim_map[RLIM_NLIMITS] = {" >> $@ ;\
+	sed -r -n "s/^\# ?define[ \t]+(RLIMIT_[A-Z0-9_]+).*/\1,/p" $< >> $@ ;\
+	echo "};" >> $@ ; \
+	echo -n '\#define AA_FS_RLIMIT_MASK "' >> $@ ;\
+	sed -r -n 's/^\# ?define[ \t]+RLIMIT_([A-Z0-9_]+).*/\L\1/p' $< | \
+	    tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
+
+$(obj)/capability.o : $(obj)/capability_names.h
+$(obj)/resource.o : $(obj)/rlim_names.h
+$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
+			    $(src)/Makefile
+	$(call cmd,make-caps)
+$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \
+		      $(src)/Makefile
+	$(call cmd,make-rlim)
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
new file mode 100644
index 0000000..9068369
--- /dev/null
+++ b/security/apparmor/apparmorfs.c
@@ -0,0 +1,970 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor /sys/kernel/security/apparmor interface functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/ctype.h>
+#include <linux/security.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/namei.h>
+#include <linux/capability.h>
+#include <linux/rcupdate.h>
+
+#include "include/apparmor.h"
+#include "include/apparmorfs.h"
+#include "include/audit.h"
+#include "include/context.h"
+#include "include/crypto.h"
+#include "include/policy.h"
+#include "include/resource.h"
+
+/**
+ * aa_mangle_name - mangle a profile name to std profile layout form
+ * @name: profile name to mangle  (NOT NULL)
+ * @target: buffer to store mangled name, same length as @name (MAYBE NULL)
+ *
+ * Returns: length of mangled name
+ */
+static int mangle_name(char *name, char *target)
+{
+	char *t = target;
+
+	while (*name == '/' || *name == '.')
+		name++;
+
+	if (target) {
+		for (; *name; name++) {
+			if (*name == '/')
+				*(t)++ = '.';
+			else if (isspace(*name))
+				*(t)++ = '_';
+			else if (isalnum(*name) || strchr("._-", *name))
+				*(t)++ = *name;
+		}
+
+		*t = 0;
+	} else {
+		int len = 0;
+		for (; *name; name++) {
+			if (isalnum(*name) || isspace(*name) ||
+			    strchr("/._-", *name))
+				len++;
+		}
+
+		return len;
+	}
+
+	return t - target;
+}
+
+/**
+ * aa_simple_write_to_buffer - common routine for getting policy from user
+ * @op: operation doing the user buffer copy
+ * @userbuf: user buffer to copy data from  (NOT NULL)
+ * @alloc_size: size of user buffer (REQUIRES: @alloc_size >= @copy_size)
+ * @copy_size: size of data to copy from user buffer
+ * @pos: position write is at in the file (NOT NULL)
+ *
+ * Returns: kernel buffer containing copy of user buffer data or an
+ *          ERR_PTR on failure.
+ */
+static char *aa_simple_write_to_buffer(int op, const char __user *userbuf,
+				       size_t alloc_size, size_t copy_size,
+				       loff_t *pos)
+{
+	char *data;
+
+	BUG_ON(copy_size > alloc_size);
+
+	if (*pos != 0)
+		/* only writes from pos 0, that is complete writes */
+		return ERR_PTR(-ESPIPE);
+
+	/*
+	 * Don't allow profile load/replace/remove from profiles that don't
+	 * have CAP_MAC_ADMIN
+	 */
+	if (!aa_may_manage_policy(op))
+		return ERR_PTR(-EACCES);
+
+	/* freed by caller to simple_write_to_buffer */
+	data = kvmalloc(alloc_size);
+	if (data == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	if (copy_from_user(data, userbuf, copy_size)) {
+		kvfree(data);
+		return ERR_PTR(-EFAULT);
+	}
+
+	return data;
+}
+
+
+/* .load file hook fn to load policy */
+static ssize_t profile_load(struct file *f, const char __user *buf, size_t size,
+			    loff_t *pos)
+{
+	char *data;
+	ssize_t error;
+
+	data = aa_simple_write_to_buffer(OP_PROF_LOAD, buf, size, size, pos);
+
+	error = PTR_ERR(data);
+	if (!IS_ERR(data)) {
+		error = aa_replace_profiles(data, size, PROF_ADD);
+		kvfree(data);
+	}
+
+	return error;
+}
+
+static const struct file_operations aa_fs_profile_load = {
+	.write = profile_load,
+	.llseek = default_llseek,
+};
+
+/* .replace file hook fn to load and/or replace policy */
+static ssize_t profile_replace(struct file *f, const char __user *buf,
+			       size_t size, loff_t *pos)
+{
+	char *data;
+	ssize_t error;
+
+	data = aa_simple_write_to_buffer(OP_PROF_REPL, buf, size, size, pos);
+	error = PTR_ERR(data);
+	if (!IS_ERR(data)) {
+		error = aa_replace_profiles(data, size, PROF_REPLACE);
+		kvfree(data);
+	}
+
+	return error;
+}
+
+static const struct file_operations aa_fs_profile_replace = {
+	.write = profile_replace,
+	.llseek = default_llseek,
+};
+
+/* .remove file hook fn to remove loaded policy */
+static ssize_t profile_remove(struct file *f, const char __user *buf,
+			      size_t size, loff_t *pos)
+{
+	char *data;
+	ssize_t error;
+
+	/*
+	 * aa_remove_profile needs a null terminated string so 1 extra
+	 * byte is allocated and the copied data is null terminated.
+	 */
+	data = aa_simple_write_to_buffer(OP_PROF_RM, buf, size + 1, size, pos);
+
+	error = PTR_ERR(data);
+	if (!IS_ERR(data)) {
+		data[size] = 0;
+		error = aa_remove_profiles(data, size);
+		kvfree(data);
+	}
+
+	return error;
+}
+
+static const struct file_operations aa_fs_profile_remove = {
+	.write = profile_remove,
+	.llseek = default_llseek,
+};
+
+static int aa_fs_seq_show(struct seq_file *seq, void *v)
+{
+	struct aa_fs_entry *fs_file = seq->private;
+
+	if (!fs_file)
+		return 0;
+
+	switch (fs_file->v_type) {
+	case AA_FS_TYPE_BOOLEAN:
+		seq_printf(seq, "%s\n", fs_file->v.boolean ? "yes" : "no");
+		break;
+	case AA_FS_TYPE_STRING:
+		seq_printf(seq, "%s\n", fs_file->v.string);
+		break;
+	case AA_FS_TYPE_U64:
+		seq_printf(seq, "%#08lx\n", fs_file->v.u64);
+		break;
+	default:
+		/* Ignore unpritable entry types. */
+		break;
+	}
+
+	return 0;
+}
+
+static int aa_fs_seq_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, aa_fs_seq_show, inode->i_private);
+}
+
+const struct file_operations aa_fs_seq_file_ops = {
+	.owner		= THIS_MODULE,
+	.open		= aa_fs_seq_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int aa_fs_seq_profile_open(struct inode *inode, struct file *file,
+				  int (*show)(struct seq_file *, void *))
+{
+	struct aa_replacedby *r = aa_get_replacedby(inode->i_private);
+	int error = single_open(file, show, r);
+
+	if (error) {
+		file->private_data = NULL;
+		aa_put_replacedby(r);
+	}
+
+	return error;
+}
+
+static int aa_fs_seq_profile_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = (struct seq_file *) file->private_data;
+	if (seq)
+		aa_put_replacedby(seq->private);
+	return single_release(inode, file);
+}
+
+static int aa_fs_seq_profname_show(struct seq_file *seq, void *v)
+{
+	struct aa_replacedby *r = seq->private;
+	struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+	seq_printf(seq, "%s\n", profile->base.name);
+	aa_put_profile(profile);
+
+	return 0;
+}
+
+static int aa_fs_seq_profname_open(struct inode *inode, struct file *file)
+{
+	return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profname_show);
+}
+
+static const struct file_operations aa_fs_profname_fops = {
+	.owner		= THIS_MODULE,
+	.open		= aa_fs_seq_profname_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_profmode_show(struct seq_file *seq, void *v)
+{
+	struct aa_replacedby *r = seq->private;
+	struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+	seq_printf(seq, "%s\n", aa_profile_mode_names[profile->mode]);
+	aa_put_profile(profile);
+
+	return 0;
+}
+
+static int aa_fs_seq_profmode_open(struct inode *inode, struct file *file)
+{
+	return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profmode_show);
+}
+
+static const struct file_operations aa_fs_profmode_fops = {
+	.owner		= THIS_MODULE,
+	.open		= aa_fs_seq_profmode_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_profattach_show(struct seq_file *seq, void *v)
+{
+	struct aa_replacedby *r = seq->private;
+	struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+	if (profile->attach)
+		seq_printf(seq, "%s\n", profile->attach);
+	else if (profile->xmatch)
+		seq_puts(seq, "<unknown>\n");
+	else
+		seq_printf(seq, "%s\n", profile->base.name);
+	aa_put_profile(profile);
+
+	return 0;
+}
+
+static int aa_fs_seq_profattach_open(struct inode *inode, struct file *file)
+{
+	return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profattach_show);
+}
+
+static const struct file_operations aa_fs_profattach_fops = {
+	.owner		= THIS_MODULE,
+	.open		= aa_fs_seq_profattach_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_hash_show(struct seq_file *seq, void *v)
+{
+	struct aa_replacedby *r = seq->private;
+	struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+	unsigned int i, size = aa_hash_size();
+
+	if (profile->hash) {
+		for (i = 0; i < size; i++)
+			seq_printf(seq, "%.2x", profile->hash[i]);
+		seq_puts(seq, "\n");
+	}
+	aa_put_profile(profile);
+
+	return 0;
+}
+
+static int aa_fs_seq_hash_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, aa_fs_seq_hash_show, inode->i_private);
+}
+
+static const struct file_operations aa_fs_seq_hash_fops = {
+	.owner		= THIS_MODULE,
+	.open		= aa_fs_seq_hash_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/** fns to setup dynamic per profile/namespace files **/
+void __aa_fs_profile_rmdir(struct aa_profile *profile)
+{
+	struct aa_profile *child;
+	int i;
+
+	if (!profile)
+		return;
+
+	list_for_each_entry(child, &profile->base.profiles, base.list)
+		__aa_fs_profile_rmdir(child);
+
+	for (i = AAFS_PROF_SIZEOF - 1; i >= 0; --i) {
+		struct aa_replacedby *r;
+		if (!profile->dents[i])
+			continue;
+
+		r = d_inode(profile->dents[i])->i_private;
+		securityfs_remove(profile->dents[i]);
+		aa_put_replacedby(r);
+		profile->dents[i] = NULL;
+	}
+}
+
+void __aa_fs_profile_migrate_dents(struct aa_profile *old,
+				   struct aa_profile *new)
+{
+	int i;
+
+	for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
+		new->dents[i] = old->dents[i];
+		old->dents[i] = NULL;
+	}
+}
+
+static struct dentry *create_profile_file(struct dentry *dir, const char *name,
+					  struct aa_profile *profile,
+					  const struct file_operations *fops)
+{
+	struct aa_replacedby *r = aa_get_replacedby(profile->replacedby);
+	struct dentry *dent;
+
+	dent = securityfs_create_file(name, S_IFREG | 0444, dir, r, fops);
+	if (IS_ERR(dent))
+		aa_put_replacedby(r);
+
+	return dent;
+}
+
+/* requires lock be held */
+int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
+{
+	struct aa_profile *child;
+	struct dentry *dent = NULL, *dir;
+	int error;
+
+	if (!parent) {
+		struct aa_profile *p;
+		p = aa_deref_parent(profile);
+		dent = prof_dir(p);
+		/* adding to parent that previously didn't have children */
+		dent = securityfs_create_dir("profiles", dent);
+		if (IS_ERR(dent))
+			goto fail;
+		prof_child_dir(p) = parent = dent;
+	}
+
+	if (!profile->dirname) {
+		int len, id_len;
+		len = mangle_name(profile->base.name, NULL);
+		id_len = snprintf(NULL, 0, ".%ld", profile->ns->uniq_id);
+
+		profile->dirname = kmalloc(len + id_len + 1, GFP_KERNEL);
+		if (!profile->dirname)
+			goto fail;
+
+		mangle_name(profile->base.name, profile->dirname);
+		sprintf(profile->dirname + len, ".%ld", profile->ns->uniq_id++);
+	}
+
+	dent = securityfs_create_dir(profile->dirname, parent);
+	if (IS_ERR(dent))
+		goto fail;
+	prof_dir(profile) = dir = dent;
+
+	dent = create_profile_file(dir, "name", profile, &aa_fs_profname_fops);
+	if (IS_ERR(dent))
+		goto fail;
+	profile->dents[AAFS_PROF_NAME] = dent;
+
+	dent = create_profile_file(dir, "mode", profile, &aa_fs_profmode_fops);
+	if (IS_ERR(dent))
+		goto fail;
+	profile->dents[AAFS_PROF_MODE] = dent;
+
+	dent = create_profile_file(dir, "attach", profile,
+				   &aa_fs_profattach_fops);
+	if (IS_ERR(dent))
+		goto fail;
+	profile->dents[AAFS_PROF_ATTACH] = dent;
+
+	if (profile->hash) {
+		dent = create_profile_file(dir, "sha1", profile,
+					   &aa_fs_seq_hash_fops);
+		if (IS_ERR(dent))
+			goto fail;
+		profile->dents[AAFS_PROF_HASH] = dent;
+	}
+
+	list_for_each_entry(child, &profile->base.profiles, base.list) {
+		error = __aa_fs_profile_mkdir(child, prof_child_dir(profile));
+		if (error)
+			goto fail2;
+	}
+
+	return 0;
+
+fail:
+	error = PTR_ERR(dent);
+
+fail2:
+	__aa_fs_profile_rmdir(profile);
+
+	return error;
+}
+
+void __aa_fs_namespace_rmdir(struct aa_namespace *ns)
+{
+	struct aa_namespace *sub;
+	struct aa_profile *child;
+	int i;
+
+	if (!ns)
+		return;
+
+	list_for_each_entry(child, &ns->base.profiles, base.list)
+		__aa_fs_profile_rmdir(child);
+
+	list_for_each_entry(sub, &ns->sub_ns, base.list) {
+		mutex_lock(&sub->lock);
+		__aa_fs_namespace_rmdir(sub);
+		mutex_unlock(&sub->lock);
+	}
+
+	for (i = AAFS_NS_SIZEOF - 1; i >= 0; --i) {
+		securityfs_remove(ns->dents[i]);
+		ns->dents[i] = NULL;
+	}
+}
+
+int __aa_fs_namespace_mkdir(struct aa_namespace *ns, struct dentry *parent,
+			    const char *name)
+{
+	struct aa_namespace *sub;
+	struct aa_profile *child;
+	struct dentry *dent, *dir;
+	int error;
+
+	if (!name)
+		name = ns->base.name;
+
+	dent = securityfs_create_dir(name, parent);
+	if (IS_ERR(dent))
+		goto fail;
+	ns_dir(ns) = dir = dent;
+
+	dent = securityfs_create_dir("profiles", dir);
+	if (IS_ERR(dent))
+		goto fail;
+	ns_subprofs_dir(ns) = dent;
+
+	dent = securityfs_create_dir("namespaces", dir);
+	if (IS_ERR(dent))
+		goto fail;
+	ns_subns_dir(ns) = dent;
+
+	list_for_each_entry(child, &ns->base.profiles, base.list) {
+		error = __aa_fs_profile_mkdir(child, ns_subprofs_dir(ns));
+		if (error)
+			goto fail2;
+	}
+
+	list_for_each_entry(sub, &ns->sub_ns, base.list) {
+		mutex_lock(&sub->lock);
+		error = __aa_fs_namespace_mkdir(sub, ns_subns_dir(ns), NULL);
+		mutex_unlock(&sub->lock);
+		if (error)
+			goto fail2;
+	}
+
+	return 0;
+
+fail:
+	error = PTR_ERR(dent);
+
+fail2:
+	__aa_fs_namespace_rmdir(ns);
+
+	return error;
+}
+
+
+#define list_entry_next(pos, member) \
+	list_entry(pos->member.next, typeof(*pos), member)
+#define list_entry_is_head(pos, head, member) (&pos->member == (head))
+
+/**
+ * __next_namespace - find the next namespace to list
+ * @root: root namespace to stop search at (NOT NULL)
+ * @ns: current ns position (NOT NULL)
+ *
+ * Find the next namespace from @ns under @root and handle all locking needed
+ * while switching current namespace.
+ *
+ * Returns: next namespace or NULL if at last namespace under @root
+ * Requires: ns->parent->lock to be held
+ * NOTE: will not unlock root->lock
+ */
+static struct aa_namespace *__next_namespace(struct aa_namespace *root,
+					     struct aa_namespace *ns)
+{
+	struct aa_namespace *parent, *next;
+
+	/* is next namespace a child */
+	if (!list_empty(&ns->sub_ns)) {
+		next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
+		mutex_lock(&next->lock);
+		return next;
+	}
+
+	/* check if the next ns is a sibling, parent, gp, .. */
+	parent = ns->parent;
+	while (ns != root) {
+		mutex_unlock(&ns->lock);
+		next = list_entry_next(ns, base.list);
+		if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
+			mutex_lock(&next->lock);
+			return next;
+		}
+		ns = parent;
+		parent = parent->parent;
+	}
+
+	return NULL;
+}
+
+/**
+ * __first_profile - find the first profile in a namespace
+ * @root: namespace that is root of profiles being displayed (NOT NULL)
+ * @ns: namespace to start in   (NOT NULL)
+ *
+ * Returns: unrefcounted profile or NULL if no profile
+ * Requires: profile->ns.lock to be held
+ */
+static struct aa_profile *__first_profile(struct aa_namespace *root,
+					  struct aa_namespace *ns)
+{
+	for (; ns; ns = __next_namespace(root, ns)) {
+		if (!list_empty(&ns->base.profiles))
+			return list_first_entry(&ns->base.profiles,
+						struct aa_profile, base.list);
+	}
+	return NULL;
+}
+
+/**
+ * __next_profile - step to the next profile in a profile tree
+ * @profile: current profile in tree (NOT NULL)
+ *
+ * Perform a depth first traversal on the profile tree in a namespace
+ *
+ * Returns: next profile or NULL if done
+ * Requires: profile->ns.lock to be held
+ */
+static struct aa_profile *__next_profile(struct aa_profile *p)
+{
+	struct aa_profile *parent;
+	struct aa_namespace *ns = p->ns;
+
+	/* is next profile a child */
+	if (!list_empty(&p->base.profiles))
+		return list_first_entry(&p->base.profiles, typeof(*p),
+					base.list);
+
+	/* is next profile a sibling, parent sibling, gp, sibling, .. */
+	parent = rcu_dereference_protected(p->parent,
+					   mutex_is_locked(&p->ns->lock));
+	while (parent) {
+		p = list_entry_next(p, base.list);
+		if (!list_entry_is_head(p, &parent->base.profiles, base.list))
+			return p;
+		p = parent;
+		parent = rcu_dereference_protected(parent->parent,
+					    mutex_is_locked(&parent->ns->lock));
+	}
+
+	/* is next another profile in the namespace */
+	p = list_entry_next(p, base.list);
+	if (!list_entry_is_head(p, &ns->base.profiles, base.list))
+		return p;
+
+	return NULL;
+}
+
+/**
+ * next_profile - step to the next profile in where ever it may be
+ * @root: root namespace  (NOT NULL)
+ * @profile: current profile  (NOT NULL)
+ *
+ * Returns: next profile or NULL if there isn't one
+ */
+static struct aa_profile *next_profile(struct aa_namespace *root,
+				       struct aa_profile *profile)
+{
+	struct aa_profile *next = __next_profile(profile);
+	if (next)
+		return next;
+
+	/* finished all profiles in namespace move to next namespace */
+	return __first_profile(root, __next_namespace(root, profile->ns));
+}
+
+/**
+ * p_start - start a depth first traversal of profile tree
+ * @f: seq_file to fill
+ * @pos: current position
+ *
+ * Returns: first profile under current namespace or NULL if none found
+ *
+ * acquires first ns->lock
+ */
+static void *p_start(struct seq_file *f, loff_t *pos)
+{
+	struct aa_profile *profile = NULL;
+	struct aa_namespace *root = aa_current_profile()->ns;
+	loff_t l = *pos;
+	f->private = aa_get_namespace(root);
+
+
+	/* find the first profile */
+	mutex_lock(&root->lock);
+	profile = __first_profile(root, root);
+
+	/* skip to position */
+	for (; profile && l > 0; l--)
+		profile = next_profile(root, profile);
+
+	return profile;
+}
+
+/**
+ * p_next - read the next profile entry
+ * @f: seq_file to fill
+ * @p: profile previously returned
+ * @pos: current position
+ *
+ * Returns: next profile after @p or NULL if none
+ *
+ * may acquire/release locks in namespace tree as necessary
+ */
+static void *p_next(struct seq_file *f, void *p, loff_t *pos)
+{
+	struct aa_profile *profile = p;
+	struct aa_namespace *ns = f->private;
+	(*pos)++;
+
+	return next_profile(ns, profile);
+}
+
+/**
+ * p_stop - stop depth first traversal
+ * @f: seq_file we are filling
+ * @p: the last profile writen
+ *
+ * Release all locking done by p_start/p_next on namespace tree
+ */
+static void p_stop(struct seq_file *f, void *p)
+{
+	struct aa_profile *profile = p;
+	struct aa_namespace *root = f->private, *ns;
+
+	if (profile) {
+		for (ns = profile->ns; ns && ns != root; ns = ns->parent)
+			mutex_unlock(&ns->lock);
+	}
+	mutex_unlock(&root->lock);
+	aa_put_namespace(root);
+}
+
+/**
+ * seq_show_profile - show a profile entry
+ * @f: seq_file to file
+ * @p: current position (profile)    (NOT NULL)
+ *
+ * Returns: error on failure
+ */
+static int seq_show_profile(struct seq_file *f, void *p)
+{
+	struct aa_profile *profile = (struct aa_profile *)p;
+	struct aa_namespace *root = f->private;
+
+	if (profile->ns != root)
+		seq_printf(f, ":%s://", aa_ns_name(root, profile->ns));
+	seq_printf(f, "%s (%s)\n", profile->base.hname,
+		   aa_profile_mode_names[profile->mode]);
+
+	return 0;
+}
+
+static const struct seq_operations aa_fs_profiles_op = {
+	.start = p_start,
+	.next = p_next,
+	.stop = p_stop,
+	.show = seq_show_profile,
+};
+
+static int profiles_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &aa_fs_profiles_op);
+}
+
+static int profiles_release(struct inode *inode, struct file *file)
+{
+	return seq_release(inode, file);
+}
+
+static const struct file_operations aa_fs_profiles_fops = {
+	.open = profiles_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = profiles_release,
+};
+
+
+/** Base file system setup **/
+static struct aa_fs_entry aa_fs_entry_file[] = {
+	AA_FS_FILE_STRING("mask", "create read write exec append mmap_exec " \
+				  "link lock"),
+	{ }
+};
+
+static struct aa_fs_entry aa_fs_entry_domain[] = {
+	AA_FS_FILE_BOOLEAN("change_hat",	1),
+	AA_FS_FILE_BOOLEAN("change_hatv",	1),
+	AA_FS_FILE_BOOLEAN("change_onexec",	1),
+	AA_FS_FILE_BOOLEAN("change_profile",	1),
+	{ }
+};
+
+static struct aa_fs_entry aa_fs_entry_policy[] = {
+	AA_FS_FILE_BOOLEAN("set_load",          1),
+	{}
+};
+
+static struct aa_fs_entry aa_fs_entry_features[] = {
+	AA_FS_DIR("policy",			aa_fs_entry_policy),
+	AA_FS_DIR("domain",			aa_fs_entry_domain),
+	AA_FS_DIR("file",			aa_fs_entry_file),
+	AA_FS_FILE_U64("capability",		VFS_CAP_FLAGS_MASK),
+	AA_FS_DIR("rlimit",			aa_fs_entry_rlimit),
+	AA_FS_DIR("caps",			aa_fs_entry_caps),
+	{ }
+};
+
+static struct aa_fs_entry aa_fs_entry_apparmor[] = {
+	AA_FS_FILE_FOPS(".load", 0640, &aa_fs_profile_load),
+	AA_FS_FILE_FOPS(".replace", 0640, &aa_fs_profile_replace),
+	AA_FS_FILE_FOPS(".remove", 0640, &aa_fs_profile_remove),
+	AA_FS_FILE_FOPS("profiles", 0640, &aa_fs_profiles_fops),
+	AA_FS_DIR("features", aa_fs_entry_features),
+	{ }
+};
+
+static struct aa_fs_entry aa_fs_entry =
+	AA_FS_DIR("apparmor", aa_fs_entry_apparmor);
+
+/**
+ * aafs_create_file - create a file entry in the apparmor securityfs
+ * @fs_file: aa_fs_entry to build an entry for (NOT NULL)
+ * @parent: the parent dentry in the securityfs
+ *
+ * Use aafs_remove_file to remove entries created with this fn.
+ */
+static int __init aafs_create_file(struct aa_fs_entry *fs_file,
+				   struct dentry *parent)
+{
+	int error = 0;
+
+	fs_file->dentry = securityfs_create_file(fs_file->name,
+						 S_IFREG | fs_file->mode,
+						 parent, fs_file,
+						 fs_file->file_ops);
+	if (IS_ERR(fs_file->dentry)) {
+		error = PTR_ERR(fs_file->dentry);
+		fs_file->dentry = NULL;
+	}
+	return error;
+}
+
+static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir);
+/**
+ * aafs_create_dir - recursively create a directory entry in the securityfs
+ * @fs_dir: aa_fs_entry (and all child entries) to build (NOT NULL)
+ * @parent: the parent dentry in the securityfs
+ *
+ * Use aafs_remove_dir to remove entries created with this fn.
+ */
+static int __init aafs_create_dir(struct aa_fs_entry *fs_dir,
+				  struct dentry *parent)
+{
+	struct aa_fs_entry *fs_file;
+	struct dentry *dir;
+	int error;
+
+	dir = securityfs_create_dir(fs_dir->name, parent);
+	if (IS_ERR(dir))
+		return PTR_ERR(dir);
+	fs_dir->dentry = dir;
+
+	for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
+		if (fs_file->v_type == AA_FS_TYPE_DIR)
+			error = aafs_create_dir(fs_file, fs_dir->dentry);
+		else
+			error = aafs_create_file(fs_file, fs_dir->dentry);
+		if (error)
+			goto failed;
+	}
+
+	return 0;
+
+failed:
+	aafs_remove_dir(fs_dir);
+
+	return error;
+}
+
+/**
+ * aafs_remove_file - drop a single file entry in the apparmor securityfs
+ * @fs_file: aa_fs_entry to detach from the securityfs (NOT NULL)
+ */
+static void __init aafs_remove_file(struct aa_fs_entry *fs_file)
+{
+	if (!fs_file->dentry)
+		return;
+
+	securityfs_remove(fs_file->dentry);
+	fs_file->dentry = NULL;
+}
+
+/**
+ * aafs_remove_dir - recursively drop a directory entry from the securityfs
+ * @fs_dir: aa_fs_entry (and all child entries) to detach (NOT NULL)
+ */
+static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir)
+{
+	struct aa_fs_entry *fs_file;
+
+	for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
+		if (fs_file->v_type == AA_FS_TYPE_DIR)
+			aafs_remove_dir(fs_file);
+		else
+			aafs_remove_file(fs_file);
+	}
+
+	aafs_remove_file(fs_dir);
+}
+
+/**
+ * aa_destroy_aafs - cleanup and free aafs
+ *
+ * releases dentries allocated by aa_create_aafs
+ */
+void __init aa_destroy_aafs(void)
+{
+	aafs_remove_dir(&aa_fs_entry);
+}
+
+/**
+ * aa_create_aafs - create the apparmor security filesystem
+ *
+ * dentries created here are released by aa_destroy_aafs
+ *
+ * Returns: error on failure
+ */
+static int __init aa_create_aafs(void)
+{
+	int error;
+
+	if (!apparmor_initialized)
+		return 0;
+
+	if (aa_fs_entry.dentry) {
+		AA_ERROR("%s: AppArmor securityfs already exists\n", __func__);
+		return -EEXIST;
+	}
+
+	/* Populate fs tree. */
+	error = aafs_create_dir(&aa_fs_entry, NULL);
+	if (error)
+		goto error;
+
+	error = __aa_fs_namespace_mkdir(root_ns, aa_fs_entry.dentry,
+					"policy");
+	if (error)
+		goto error;
+
+	/* TODO: add support for apparmorfs_null and apparmorfs_mnt */
+
+	/* Report that AppArmor fs is enabled */
+	aa_info_message("AppArmor Filesystem Enabled");
+	return 0;
+
+error:
+	aa_destroy_aafs();
+	AA_ERROR("Error creating AppArmor securityfs\n");
+	return error;
+}
+
+fs_initcall(aa_create_aafs);
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
new file mode 100644
index 0000000..89c7865
--- /dev/null
+++ b/security/apparmor/audit.c
@@ -0,0 +1,209 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor auditing functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/audit.h>
+#include <linux/socket.h>
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/policy.h"
+
+const char *const op_table[] = {
+	"null",
+
+	"sysctl",
+	"capable",
+
+	"unlink",
+	"mkdir",
+	"rmdir",
+	"mknod",
+	"truncate",
+	"link",
+	"symlink",
+	"rename_src",
+	"rename_dest",
+	"chmod",
+	"chown",
+	"getattr",
+	"open",
+
+	"file_perm",
+	"file_lock",
+	"file_mmap",
+	"file_mprotect",
+
+	"create",
+	"post_create",
+	"bind",
+	"connect",
+	"listen",
+	"accept",
+	"sendmsg",
+	"recvmsg",
+	"getsockname",
+	"getpeername",
+	"getsockopt",
+	"setsockopt",
+	"socket_shutdown",
+
+	"ptrace",
+
+	"exec",
+	"change_hat",
+	"change_profile",
+	"change_onexec",
+
+	"setprocattr",
+	"setrlimit",
+
+	"profile_replace",
+	"profile_load",
+	"profile_remove"
+};
+
+const char *const audit_mode_names[] = {
+	"normal",
+	"quiet_denied",
+	"quiet",
+	"noquiet",
+	"all"
+};
+
+static const char *const aa_audit_type[] = {
+	"AUDIT",
+	"ALLOWED",
+	"DENIED",
+	"HINT",
+	"STATUS",
+	"ERROR",
+	"KILLED",
+	"AUTO"
+};
+
+/*
+ * Currently AppArmor auditing is fed straight into the audit framework.
+ *
+ * TODO:
+ * netlink interface for complain mode
+ * user auditing, - send user auditing to netlink interface
+ * system control of whether user audit messages go to system log
+ */
+
+/**
+ * audit_base - core AppArmor function.
+ * @ab: audit buffer to fill (NOT NULL)
+ * @ca: audit structure containing data to audit (NOT NULL)
+ *
+ * Record common AppArmor audit data from @sa
+ */
+static void audit_pre(struct audit_buffer *ab, void *ca)
+{
+	struct common_audit_data *sa = ca;
+
+	if (aa_g_audit_header) {
+		audit_log_format(ab, "apparmor=");
+		audit_log_string(ab, aa_audit_type[sa->aad->type]);
+	}
+
+	if (sa->aad->op) {
+		audit_log_format(ab, " operation=");
+		audit_log_string(ab, op_table[sa->aad->op]);
+	}
+
+	if (sa->aad->info) {
+		audit_log_format(ab, " info=");
+		audit_log_string(ab, sa->aad->info);
+		if (sa->aad->error)
+			audit_log_format(ab, " error=%d", sa->aad->error);
+	}
+
+	if (sa->aad->profile) {
+		struct aa_profile *profile = sa->aad->profile;
+		if (profile->ns != root_ns) {
+			audit_log_format(ab, " namespace=");
+			audit_log_untrustedstring(ab, profile->ns->base.hname);
+		}
+		audit_log_format(ab, " profile=");
+		audit_log_untrustedstring(ab, profile->base.hname);
+	}
+
+	if (sa->aad->name) {
+		audit_log_format(ab, " name=");
+		audit_log_untrustedstring(ab, sa->aad->name);
+	}
+}
+
+/**
+ * aa_audit_msg - Log a message to the audit subsystem
+ * @sa: audit event structure (NOT NULL)
+ * @cb: optional callback fn for type specific fields (MAYBE NULL)
+ */
+void aa_audit_msg(int type, struct common_audit_data *sa,
+		  void (*cb) (struct audit_buffer *, void *))
+{
+	sa->aad->type = type;
+	common_lsm_audit(sa, audit_pre, cb);
+}
+
+/**
+ * aa_audit - Log a profile based audit event to the audit subsystem
+ * @type: audit type for the message
+ * @profile: profile to check against (NOT NULL)
+ * @gfp: allocation flags to use
+ * @sa: audit event (NOT NULL)
+ * @cb: optional callback fn for type specific fields (MAYBE NULL)
+ *
+ * Handle default message switching based off of audit mode flags
+ *
+ * Returns: error on failure
+ */
+int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
+	     struct common_audit_data *sa,
+	     void (*cb) (struct audit_buffer *, void *))
+{
+	BUG_ON(!profile);
+
+	if (type == AUDIT_APPARMOR_AUTO) {
+		if (likely(!sa->aad->error)) {
+			if (AUDIT_MODE(profile) != AUDIT_ALL)
+				return 0;
+			type = AUDIT_APPARMOR_AUDIT;
+		} else if (COMPLAIN_MODE(profile))
+			type = AUDIT_APPARMOR_ALLOWED;
+		else
+			type = AUDIT_APPARMOR_DENIED;
+	}
+	if (AUDIT_MODE(profile) == AUDIT_QUIET ||
+	    (type == AUDIT_APPARMOR_DENIED &&
+	     AUDIT_MODE(profile) == AUDIT_QUIET))
+		return sa->aad->error;
+
+	if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
+		type = AUDIT_APPARMOR_KILL;
+
+	if (!unconfined(profile))
+		sa->aad->profile = profile;
+
+	aa_audit_msg(type, sa, cb);
+
+	if (sa->aad->type == AUDIT_APPARMOR_KILL)
+		(void)send_sig_info(SIGKILL, NULL,
+				    sa->u.tsk ?  sa->u.tsk : current);
+
+	if (sa->aad->type == AUDIT_APPARMOR_ALLOWED)
+		return complain_error(sa->aad->error);
+
+	return sa->aad->error;
+}
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
new file mode 100644
index 0000000..1101c6f
--- /dev/null
+++ b/security/apparmor/capability.c
@@ -0,0 +1,143 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor capability mediation functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+
+#include "include/apparmor.h"
+#include "include/capability.h"
+#include "include/context.h"
+#include "include/policy.h"
+#include "include/audit.h"
+
+/*
+ * Table of capability names: we generate it from capabilities.h.
+ */
+#include "capability_names.h"
+
+struct aa_fs_entry aa_fs_entry_caps[] = {
+	AA_FS_FILE_STRING("mask", AA_FS_CAPS_MASK),
+	{ }
+};
+
+struct audit_cache {
+	struct aa_profile *profile;
+	kernel_cap_t caps;
+};
+
+static DEFINE_PER_CPU(struct audit_cache, audit_cache);
+
+/**
+ * audit_cb - call back for capability components of audit struct
+ * @ab - audit buffer   (NOT NULL)
+ * @va - audit struct to audit data from  (NOT NULL)
+ */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+	struct common_audit_data *sa = va;
+	audit_log_format(ab, " capname=");
+	audit_log_untrustedstring(ab, capability_names[sa->u.cap]);
+}
+
+/**
+ * audit_caps - audit a capability
+ * @profile: profile being tested for confinement (NOT NULL)
+ * @cap: capability tested
+ * @error: error code returned by test
+ *
+ * Do auditing of capability and handle, audit/complain/kill modes switching
+ * and duplicate message elimination.
+ *
+ * Returns: 0 or sa->error on success,  error code on failure
+ */
+static int audit_caps(struct aa_profile *profile, int cap, int error)
+{
+	struct audit_cache *ent;
+	int type = AUDIT_APPARMOR_AUTO;
+	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
+	sa.type = LSM_AUDIT_DATA_CAP;
+	sa.aad = &aad;
+	sa.u.cap = cap;
+	sa.aad->op = OP_CAPABLE;
+	sa.aad->error = error;
+
+	if (likely(!error)) {
+		/* test if auditing is being forced */
+		if (likely((AUDIT_MODE(profile) != AUDIT_ALL) &&
+			   !cap_raised(profile->caps.audit, cap)))
+			return 0;
+		type = AUDIT_APPARMOR_AUDIT;
+	} else if (KILL_MODE(profile) ||
+		   cap_raised(profile->caps.kill, cap)) {
+		type = AUDIT_APPARMOR_KILL;
+	} else if (cap_raised(profile->caps.quiet, cap) &&
+		   AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+		   AUDIT_MODE(profile) != AUDIT_ALL) {
+		/* quiet auditing */
+		return error;
+	}
+
+	/* Do simple duplicate message elimination */
+	ent = &get_cpu_var(audit_cache);
+	if (profile == ent->profile && cap_raised(ent->caps, cap)) {
+		put_cpu_var(audit_cache);
+		if (COMPLAIN_MODE(profile))
+			return complain_error(error);
+		return error;
+	} else {
+		aa_put_profile(ent->profile);
+		ent->profile = aa_get_profile(profile);
+		cap_raise(ent->caps, cap);
+	}
+	put_cpu_var(audit_cache);
+
+	return aa_audit(type, profile, GFP_ATOMIC, &sa, audit_cb);
+}
+
+/**
+ * profile_capable - test if profile allows use of capability @cap
+ * @profile: profile being enforced    (NOT NULL, NOT unconfined)
+ * @cap: capability to test if allowed
+ *
+ * Returns: 0 if allowed else -EPERM
+ */
+static int profile_capable(struct aa_profile *profile, int cap)
+{
+	return cap_raised(profile->caps.allow, cap) ? 0 : -EPERM;
+}
+
+/**
+ * aa_capable - test permission to use capability
+ * @profile: profile being tested against (NOT NULL)
+ * @cap: capability to be tested
+ * @audit: whether an audit record should be generated
+ *
+ * Look up capability in profile capability set.
+ *
+ * Returns: 0 on success, or else an error code.
+ */
+int aa_capable(struct aa_profile *profile, int cap, int audit)
+{
+	int error = profile_capable(profile, cap);
+
+	if (!audit) {
+		if (COMPLAIN_MODE(profile))
+			return complain_error(error);
+		return error;
+	}
+
+	return audit_caps(profile, cap, error);
+}
diff --git a/security/apparmor/context.c b/security/apparmor/context.c
new file mode 100644
index 0000000..3064c6c
--- /dev/null
+++ b/security/apparmor/context.c
@@ -0,0 +1,222 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor functions used to manipulate object security
+ * contexts.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ *
+ * AppArmor sets confinement on every task, via the the aa_task_cxt and
+ * the aa_task_cxt.profile, both of which are required and are not allowed
+ * to be NULL.  The aa_task_cxt is not reference counted and is unique
+ * to each cred (which is reference count).  The profile pointed to by
+ * the task_cxt is reference counted.
+ *
+ * TODO
+ * If a task uses change_hat it currently does not return to the old
+ * cred or task context but instead creates a new one.  Ideally the task
+ * should return to the previous cred if it has not been modified.
+ *
+ */
+
+#include "include/context.h"
+#include "include/policy.h"
+
+/**
+ * aa_alloc_task_context - allocate a new task_cxt
+ * @flags: gfp flags for allocation
+ *
+ * Returns: allocated buffer or NULL on failure
+ */
+struct aa_task_cxt *aa_alloc_task_context(gfp_t flags)
+{
+	return kzalloc(sizeof(struct aa_task_cxt), flags);
+}
+
+/**
+ * aa_free_task_context - free a task_cxt
+ * @cxt: task_cxt to free (MAYBE NULL)
+ */
+void aa_free_task_context(struct aa_task_cxt *cxt)
+{
+	if (cxt) {
+		aa_put_profile(cxt->profile);
+		aa_put_profile(cxt->previous);
+		aa_put_profile(cxt->onexec);
+
+		kzfree(cxt);
+	}
+}
+
+/**
+ * aa_dup_task_context - duplicate a task context, incrementing reference counts
+ * @new: a blank task context      (NOT NULL)
+ * @old: the task context to copy  (NOT NULL)
+ */
+void aa_dup_task_context(struct aa_task_cxt *new, const struct aa_task_cxt *old)
+{
+	*new = *old;
+	aa_get_profile(new->profile);
+	aa_get_profile(new->previous);
+	aa_get_profile(new->onexec);
+}
+
+/**
+ * aa_get_task_profile - Get another task's profile
+ * @task: task to query  (NOT NULL)
+ *
+ * Returns: counted reference to @task's profile
+ */
+struct aa_profile *aa_get_task_profile(struct task_struct *task)
+{
+	struct aa_profile *p;
+
+	rcu_read_lock();
+	p = aa_get_profile(__aa_task_profile(task));
+	rcu_read_unlock();
+
+	return p;
+}
+
+/**
+ * aa_replace_current_profile - replace the current tasks profiles
+ * @profile: new profile  (NOT NULL)
+ *
+ * Returns: 0 or error on failure
+ */
+int aa_replace_current_profile(struct aa_profile *profile)
+{
+	struct aa_task_cxt *cxt = current_cxt();
+	struct cred *new;
+	BUG_ON(!profile);
+
+	if (cxt->profile == profile)
+		return 0;
+
+	new  = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+
+	cxt = cred_cxt(new);
+	if (unconfined(profile) || (cxt->profile->ns != profile->ns))
+		/* if switching to unconfined or a different profile namespace
+		 * clear out context state
+		 */
+		aa_clear_task_cxt_trans(cxt);
+
+	/* be careful switching cxt->profile, when racing replacement it
+	 * is possible that cxt->profile->replacedby->profile is the reference
+	 * keeping @profile valid, so make sure to get its reference before
+	 * dropping the reference on cxt->profile */
+	aa_get_profile(profile);
+	aa_put_profile(cxt->profile);
+	cxt->profile = profile;
+
+	commit_creds(new);
+	return 0;
+}
+
+/**
+ * aa_set_current_onexec - set the tasks change_profile to happen onexec
+ * @profile: system profile to set at exec  (MAYBE NULL to clear value)
+ *
+ * Returns: 0 or error on failure
+ */
+int aa_set_current_onexec(struct aa_profile *profile)
+{
+	struct aa_task_cxt *cxt;
+	struct cred *new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+
+	cxt = cred_cxt(new);
+	aa_get_profile(profile);
+	aa_put_profile(cxt->onexec);
+	cxt->onexec = profile;
+
+	commit_creds(new);
+	return 0;
+}
+
+/**
+ * aa_set_current_hat - set the current tasks hat
+ * @profile: profile to set as the current hat  (NOT NULL)
+ * @token: token value that must be specified to change from the hat
+ *
+ * Do switch of tasks hat.  If the task is currently in a hat
+ * validate the token to match.
+ *
+ * Returns: 0 or error on failure
+ */
+int aa_set_current_hat(struct aa_profile *profile, u64 token)
+{
+	struct aa_task_cxt *cxt;
+	struct cred *new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+	BUG_ON(!profile);
+
+	cxt = cred_cxt(new);
+	if (!cxt->previous) {
+		/* transfer refcount */
+		cxt->previous = cxt->profile;
+		cxt->token = token;
+	} else if (cxt->token == token) {
+		aa_put_profile(cxt->profile);
+	} else {
+		/* previous_profile && cxt->token != token */
+		abort_creds(new);
+		return -EACCES;
+	}
+	cxt->profile = aa_get_newest_profile(profile);
+	/* clear exec on switching context */
+	aa_put_profile(cxt->onexec);
+	cxt->onexec = NULL;
+
+	commit_creds(new);
+	return 0;
+}
+
+/**
+ * aa_restore_previous_profile - exit from hat context restoring the profile
+ * @token: the token that must be matched to exit hat context
+ *
+ * Attempt to return out of a hat to the previous profile.  The token
+ * must match the stored token value.
+ *
+ * Returns: 0 or error of failure
+ */
+int aa_restore_previous_profile(u64 token)
+{
+	struct aa_task_cxt *cxt;
+	struct cred *new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+
+	cxt = cred_cxt(new);
+	if (cxt->token != token) {
+		abort_creds(new);
+		return -EACCES;
+	}
+	/* ignore restores when there is no saved profile */
+	if (!cxt->previous) {
+		abort_creds(new);
+		return 0;
+	}
+
+	aa_put_profile(cxt->profile);
+	cxt->profile = aa_get_newest_profile(cxt->previous);
+	BUG_ON(!cxt->profile);
+	/* clear exec && prev information when restoring to previous context */
+	aa_clear_task_cxt_trans(cxt);
+
+	commit_creds(new);
+	return 0;
+}
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
new file mode 100644
index 0000000..532471d
--- /dev/null
+++ b/security/apparmor/crypto.c
@@ -0,0 +1,95 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright 2013 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * Fns to provide a checksum of policy that has been loaded this can be
+ * compared to userspace policy compiles to check loaded policy is what
+ * it should be.
+ */
+
+#include <crypto/hash.h>
+
+#include "include/apparmor.h"
+#include "include/crypto.h"
+
+static unsigned int apparmor_hash_size;
+
+static struct crypto_shash *apparmor_tfm;
+
+unsigned int aa_hash_size(void)
+{
+	return apparmor_hash_size;
+}
+
+int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
+			 size_t len)
+{
+	struct {
+		struct shash_desc shash;
+		char ctx[crypto_shash_descsize(apparmor_tfm)];
+	} desc;
+	int error = -ENOMEM;
+	u32 le32_version = cpu_to_le32(version);
+
+	if (!apparmor_tfm)
+		return 0;
+
+	profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
+	if (!profile->hash)
+		goto fail;
+
+	desc.shash.tfm = apparmor_tfm;
+	desc.shash.flags = 0;
+
+	error = crypto_shash_init(&desc.shash);
+	if (error)
+		goto fail;
+	error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4);
+	if (error)
+		goto fail;
+	error = crypto_shash_update(&desc.shash, (u8 *) start, len);
+	if (error)
+		goto fail;
+	error = crypto_shash_final(&desc.shash, profile->hash);
+	if (error)
+		goto fail;
+
+	return 0;
+
+fail:
+	kfree(profile->hash);
+	profile->hash = NULL;
+
+	return error;
+}
+
+static int __init init_profile_hash(void)
+{
+	struct crypto_shash *tfm;
+
+	if (!apparmor_initialized)
+		return 0;
+
+	tfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		int error = PTR_ERR(tfm);
+		AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
+		return error;
+	}
+	apparmor_tfm = tfm;
+	apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
+
+	aa_info_message("AppArmor sha1 policy hashing enabled");
+
+	return 0;
+}
+
+late_initcall(init_profile_hash);
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
new file mode 100644
index 0000000..53426a6
--- /dev/null
+++ b/security/apparmor/domain.c
@@ -0,0 +1,852 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy attachment and domain transitions
+ *
+ * Copyright (C) 2002-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/errno.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <linux/personality.h>
+
+#include "include/audit.h"
+#include "include/apparmorfs.h"
+#include "include/context.h"
+#include "include/domain.h"
+#include "include/file.h"
+#include "include/ipc.h"
+#include "include/match.h"
+#include "include/path.h"
+#include "include/policy.h"
+
+/**
+ * aa_free_domain_entries - free entries in a domain table
+ * @domain: the domain table to free  (MAYBE NULL)
+ */
+void aa_free_domain_entries(struct aa_domain *domain)
+{
+	int i;
+	if (domain) {
+		if (!domain->table)
+			return;
+
+		for (i = 0; i < domain->size; i++)
+			kzfree(domain->table[i]);
+		kzfree(domain->table);
+		domain->table = NULL;
+	}
+}
+
+/**
+ * may_change_ptraced_domain - check if can change profile on ptraced task
+ * @to_profile: profile to change to  (NOT NULL)
+ *
+ * Check if current is ptraced and if so if the tracing task is allowed
+ * to trace the new domain
+ *
+ * Returns: %0 or error if change not allowed
+ */
+static int may_change_ptraced_domain(struct aa_profile *to_profile)
+{
+	struct task_struct *tracer;
+	struct aa_profile *tracerp = NULL;
+	int error = 0;
+
+	rcu_read_lock();
+	tracer = ptrace_parent(current);
+	if (tracer)
+		/* released below */
+		tracerp = aa_get_task_profile(tracer);
+
+	/* not ptraced */
+	if (!tracer || unconfined(tracerp))
+		goto out;
+
+	error = aa_may_ptrace(tracerp, to_profile, PTRACE_MODE_ATTACH);
+
+out:
+	rcu_read_unlock();
+	aa_put_profile(tracerp);
+
+	return error;
+}
+
+/**
+ * change_profile_perms - find permissions for change_profile
+ * @profile: the current profile  (NOT NULL)
+ * @ns: the namespace being switched to  (NOT NULL)
+ * @name: the name of the profile to change to  (NOT NULL)
+ * @request: requested perms
+ * @start: state to start matching in
+ *
+ * Returns: permission set
+ */
+static struct file_perms change_profile_perms(struct aa_profile *profile,
+					      struct aa_namespace *ns,
+					      const char *name, u32 request,
+					      unsigned int start)
+{
+	struct file_perms perms;
+	struct path_cond cond = { };
+	unsigned int state;
+
+	if (unconfined(profile)) {
+		perms.allow = AA_MAY_CHANGE_PROFILE | AA_MAY_ONEXEC;
+		perms.audit = perms.quiet = perms.kill = 0;
+		return perms;
+	} else if (!profile->file.dfa) {
+		return nullperms;
+	} else if ((ns == profile->ns)) {
+		/* try matching against rules with out namespace prepended */
+		aa_str_perms(profile->file.dfa, start, name, &cond, &perms);
+		if (COMBINED_PERM_MASK(perms) & request)
+			return perms;
+	}
+
+	/* try matching with namespace name and then profile */
+	state = aa_dfa_match(profile->file.dfa, start, ns->base.name);
+	state = aa_dfa_match_len(profile->file.dfa, state, ":", 1);
+	aa_str_perms(profile->file.dfa, state, name, &cond, &perms);
+
+	return perms;
+}
+
+/**
+ * __attach_match_ - find an attachment match
+ * @name - to match against  (NOT NULL)
+ * @head - profile list to walk  (NOT NULL)
+ *
+ * Do a linear search on the profiles in the list.  There is a matching
+ * preference where an exact match is preferred over a name which uses
+ * expressions to match, and matching expressions with the greatest
+ * xmatch_len are preferred.
+ *
+ * Requires: @head not be shared or have appropriate locks held
+ *
+ * Returns: profile or NULL if no match found
+ */
+static struct aa_profile *__attach_match(const char *name,
+					 struct list_head *head)
+{
+	int len = 0;
+	struct aa_profile *profile, *candidate = NULL;
+
+	list_for_each_entry_rcu(profile, head, base.list) {
+		if (profile->flags & PFLAG_NULL)
+			continue;
+		if (profile->xmatch && profile->xmatch_len > len) {
+			unsigned int state = aa_dfa_match(profile->xmatch,
+							  DFA_START, name);
+			u32 perm = dfa_user_allow(profile->xmatch, state);
+			/* any accepting state means a valid match. */
+			if (perm & MAY_EXEC) {
+				candidate = profile;
+				len = profile->xmatch_len;
+			}
+		} else if (!strcmp(profile->base.name, name))
+			/* exact non-re match, no more searching required */
+			return profile;
+	}
+
+	return candidate;
+}
+
+/**
+ * find_attach - do attachment search for unconfined processes
+ * @ns: the current namespace  (NOT NULL)
+ * @list: list to search  (NOT NULL)
+ * @name: the executable name to match against  (NOT NULL)
+ *
+ * Returns: profile or NULL if no match found
+ */
+static struct aa_profile *find_attach(struct aa_namespace *ns,
+				      struct list_head *list, const char *name)
+{
+	struct aa_profile *profile;
+
+	rcu_read_lock();
+	profile = aa_get_profile(__attach_match(name, list));
+	rcu_read_unlock();
+
+	return profile;
+}
+
+/**
+ * separate_fqname - separate the namespace and profile names
+ * @fqname: the fqname name to split  (NOT NULL)
+ * @ns_name: the namespace name if it exists  (NOT NULL)
+ *
+ * This is the xtable equivalent routine of aa_split_fqname.  It finds the
+ * split in an xtable fqname which contains an embedded \0 instead of a :
+ * if a namespace is specified.  This is done so the xtable is constant and
+ * isn't re-split on every lookup.
+ *
+ * Either the profile or namespace name may be optional but if the namespace
+ * is specified the profile name termination must be present.  This results
+ * in the following possible encodings:
+ * profile_name\0
+ * :ns_name\0profile_name\0
+ * :ns_name\0\0
+ *
+ * NOTE: the xtable fqname is pre-validated at load time in unpack_trans_table
+ *
+ * Returns: profile name if it is specified else NULL
+ */
+static const char *separate_fqname(const char *fqname, const char **ns_name)
+{
+	const char *name;
+
+	if (fqname[0] == ':') {
+		/* In this case there is guaranteed to be two \0 terminators
+		 * in the string.  They are verified at load time by
+		 * by unpack_trans_table
+		 */
+		*ns_name = fqname + 1;		/* skip : */
+		name = *ns_name + strlen(*ns_name) + 1;
+		if (!*name)
+			name = NULL;
+	} else {
+		*ns_name = NULL;
+		name = fqname;
+	}
+
+	return name;
+}
+
+static const char *next_name(int xtype, const char *name)
+{
+	return NULL;
+}
+
+/**
+ * x_table_lookup - lookup an x transition name via transition table
+ * @profile: current profile (NOT NULL)
+ * @xindex: index into x transition table
+ *
+ * Returns: refcounted profile, or NULL on failure (MAYBE NULL)
+ */
+static struct aa_profile *x_table_lookup(struct aa_profile *profile, u32 xindex)
+{
+	struct aa_profile *new_profile = NULL;
+	struct aa_namespace *ns = profile->ns;
+	u32 xtype = xindex & AA_X_TYPE_MASK;
+	int index = xindex & AA_X_INDEX_MASK;
+	const char *name;
+
+	/* index is guaranteed to be in range, validated at load time */
+	for (name = profile->file.trans.table[index]; !new_profile && name;
+	     name = next_name(xtype, name)) {
+		struct aa_namespace *new_ns;
+		const char *xname = NULL;
+
+		new_ns = NULL;
+		if (xindex & AA_X_CHILD) {
+			/* release by caller */
+			new_profile = aa_find_child(profile, name);
+			continue;
+		} else if (*name == ':') {
+			/* switching namespace */
+			const char *ns_name;
+			xname = name = separate_fqname(name, &ns_name);
+			if (!xname)
+				/* no name so use profile name */
+				xname = profile->base.hname;
+			if (*ns_name == '@') {
+				/* TODO: variable support */
+				;
+			}
+			/* released below */
+			new_ns = aa_find_namespace(ns, ns_name);
+			if (!new_ns)
+				continue;
+		} else if (*name == '@') {
+			/* TODO: variable support */
+			continue;
+		} else {
+			/* basic namespace lookup */
+			xname = name;
+		}
+
+		/* released by caller */
+		new_profile = aa_lookup_profile(new_ns ? new_ns : ns, xname);
+		aa_put_namespace(new_ns);
+	}
+
+	/* released by caller */
+	return new_profile;
+}
+
+/**
+ * x_to_profile - get target profile for a given xindex
+ * @profile: current profile  (NOT NULL)
+ * @name: name to lookup (NOT NULL)
+ * @xindex: index into x transition table
+ *
+ * find profile for a transition index
+ *
+ * Returns: refcounted profile or NULL if not found available
+ */
+static struct aa_profile *x_to_profile(struct aa_profile *profile,
+				       const char *name, u32 xindex)
+{
+	struct aa_profile *new_profile = NULL;
+	struct aa_namespace *ns = profile->ns;
+	u32 xtype = xindex & AA_X_TYPE_MASK;
+
+	switch (xtype) {
+	case AA_X_NONE:
+		/* fail exec unless ix || ux fallback - handled by caller */
+		return NULL;
+	case AA_X_NAME:
+		if (xindex & AA_X_CHILD)
+			/* released by caller */
+			new_profile = find_attach(ns, &profile->base.profiles,
+						  name);
+		else
+			/* released by caller */
+			new_profile = find_attach(ns, &ns->base.profiles,
+						  name);
+		break;
+	case AA_X_TABLE:
+		/* released by caller */
+		new_profile = x_table_lookup(profile, xindex);
+		break;
+	}
+
+	/* released by caller */
+	return new_profile;
+}
+
+/**
+ * apparmor_bprm_set_creds - set the new creds on the bprm struct
+ * @bprm: binprm for the exec  (NOT NULL)
+ *
+ * Returns: %0 or error on failure
+ */
+int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+{
+	struct aa_task_cxt *cxt;
+	struct aa_profile *profile, *new_profile = NULL;
+	struct aa_namespace *ns;
+	char *buffer = NULL;
+	unsigned int state;
+	struct file_perms perms = {};
+	struct path_cond cond = {
+		file_inode(bprm->file)->i_uid,
+		file_inode(bprm->file)->i_mode
+	};
+	const char *name = NULL, *target = NULL, *info = NULL;
+	int error = 0;
+
+	if (bprm->cred_prepared)
+		return 0;
+
+	cxt = cred_cxt(bprm->cred);
+	BUG_ON(!cxt);
+
+	profile = aa_get_newest_profile(cxt->profile);
+	/*
+	 * get the namespace from the replacement profile as replacement
+	 * can change the namespace
+	 */
+	ns = profile->ns;
+	state = profile->file.start;
+
+	/* buffer freed below, name is pointer into buffer */
+	error = aa_path_name(&bprm->file->f_path, profile->path_flags, &buffer,
+			     &name, &info);
+	if (error) {
+		if (unconfined(profile) ||
+		    (profile->flags & PFLAG_IX_ON_NAME_ERROR))
+			error = 0;
+		name = bprm->filename;
+		goto audit;
+	}
+
+	/* Test for onexec first as onexec directives override other
+	 * x transitions.
+	 */
+	if (unconfined(profile)) {
+		/* unconfined task */
+		if (cxt->onexec)
+			/* change_profile on exec already been granted */
+			new_profile = aa_get_profile(cxt->onexec);
+		else
+			new_profile = find_attach(ns, &ns->base.profiles, name);
+		if (!new_profile)
+			goto cleanup;
+		/*
+		 * NOTE: Domain transitions from unconfined are allowed
+		 * even when no_new_privs is set because this aways results
+		 * in a further reduction of permissions.
+		 */
+		goto apply;
+	}
+
+	/* find exec permissions for name */
+	state = aa_str_perms(profile->file.dfa, state, name, &cond, &perms);
+	if (cxt->onexec) {
+		struct file_perms cp;
+		info = "change_profile onexec";
+		if (!(perms.allow & AA_MAY_ONEXEC))
+			goto audit;
+
+		/* test if this exec can be paired with change_profile onexec.
+		 * onexec permission is linked to exec with a standard pairing
+		 * exec\0change_profile
+		 */
+		state = aa_dfa_null_transition(profile->file.dfa, state);
+		cp = change_profile_perms(profile, cxt->onexec->ns,
+					  cxt->onexec->base.name,
+					  AA_MAY_ONEXEC, state);
+
+		if (!(cp.allow & AA_MAY_ONEXEC))
+			goto audit;
+		new_profile = aa_get_newest_profile(cxt->onexec);
+		goto apply;
+	}
+
+	if (perms.allow & MAY_EXEC) {
+		/* exec permission determine how to transition */
+		new_profile = x_to_profile(profile, name, perms.xindex);
+		if (!new_profile) {
+			if (perms.xindex & AA_X_INHERIT) {
+				/* (p|c|n)ix - don't change profile but do
+				 * use the newest version, which was picked
+				 * up above when getting profile
+				 */
+				info = "ix fallback";
+				new_profile = aa_get_profile(profile);
+				goto x_clear;
+			} else if (perms.xindex & AA_X_UNCONFINED) {
+				new_profile = aa_get_newest_profile(ns->unconfined);
+				info = "ux fallback";
+			} else {
+				error = -ENOENT;
+				info = "profile not found";
+				/* remove MAY_EXEC to audit as failure */
+				perms.allow &= ~MAY_EXEC;
+			}
+		}
+	} else if (COMPLAIN_MODE(profile)) {
+		/* no exec permission - are we in learning mode */
+		new_profile = aa_new_null_profile(profile, 0);
+		if (!new_profile) {
+			error = -ENOMEM;
+			info = "could not create null profile";
+		} else {
+			error = -EACCES;
+			target = new_profile->base.hname;
+		}
+		perms.xindex |= AA_X_UNSAFE;
+	} else
+		/* fail exec */
+		error = -EACCES;
+
+	/*
+	 * Policy has specified a domain transition, if no_new_privs then
+	 * fail the exec.
+	 */
+	if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) {
+		aa_put_profile(new_profile);
+		error = -EPERM;
+		goto cleanup;
+	}
+
+	if (!new_profile)
+		goto audit;
+
+	if (bprm->unsafe & LSM_UNSAFE_SHARE) {
+		/* FIXME: currently don't mediate shared state */
+		;
+	}
+
+	if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
+		error = may_change_ptraced_domain(new_profile);
+		if (error) {
+			aa_put_profile(new_profile);
+			goto audit;
+		}
+	}
+
+	/* Determine if secure exec is needed.
+	 * Can be at this point for the following reasons:
+	 * 1. unconfined switching to confined
+	 * 2. confined switching to different confinement
+	 * 3. confined switching to unconfined
+	 *
+	 * Cases 2 and 3 are marked as requiring secure exec
+	 * (unless policy specified "unsafe exec")
+	 *
+	 * bprm->unsafe is used to cache the AA_X_UNSAFE permission
+	 * to avoid having to recompute in secureexec
+	 */
+	if (!(perms.xindex & AA_X_UNSAFE)) {
+		AA_DEBUG("scrubbing environment variables for %s profile=%s\n",
+			 name, new_profile->base.hname);
+		bprm->unsafe |= AA_SECURE_X_NEEDED;
+	}
+apply:
+	target = new_profile->base.hname;
+	/* when transitioning profiles clear unsafe personality bits */
+	bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+x_clear:
+	aa_put_profile(cxt->profile);
+	/* transfer new profile reference will be released when cxt is freed */
+	cxt->profile = new_profile;
+
+	/* clear out all temporary/transitional state from the context */
+	aa_clear_task_cxt_trans(cxt);
+
+audit:
+	error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_EXEC, MAY_EXEC,
+			      name, target, cond.uid, info, error);
+
+cleanup:
+	aa_put_profile(profile);
+	kfree(buffer);
+
+	return error;
+}
+
+/**
+ * apparmor_bprm_secureexec - determine if secureexec is needed
+ * @bprm: binprm for exec  (NOT NULL)
+ *
+ * Returns: %1 if secureexec is needed else %0
+ */
+int apparmor_bprm_secureexec(struct linux_binprm *bprm)
+{
+	/* the decision to use secure exec is computed in set_creds
+	 * and stored in bprm->unsafe.
+	 */
+	if (bprm->unsafe & AA_SECURE_X_NEEDED)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * apparmor_bprm_committing_creds - do task cleanup on committing new creds
+ * @bprm: binprm for the exec  (NOT NULL)
+ */
+void apparmor_bprm_committing_creds(struct linux_binprm *bprm)
+{
+	struct aa_profile *profile = __aa_current_profile();
+	struct aa_task_cxt *new_cxt = cred_cxt(bprm->cred);
+
+	/* bail out if unconfined or not changing profile */
+	if ((new_cxt->profile == profile) ||
+	    (unconfined(new_cxt->profile)))
+		return;
+
+	current->pdeath_signal = 0;
+
+	/* reset soft limits and set hard limits for the new profile */
+	__aa_transition_rlimits(profile, new_cxt->profile);
+}
+
+/**
+ * apparmor_bprm_commited_cred - do cleanup after new creds committed
+ * @bprm: binprm for the exec  (NOT NULL)
+ */
+void apparmor_bprm_committed_creds(struct linux_binprm *bprm)
+{
+	/* TODO: cleanup signals - ipc mediation */
+	return;
+}
+
+/*
+ * Functions for self directed profile change
+ */
+
+/**
+ * new_compound_name - create an hname with @n2 appended to @n1
+ * @n1: base of hname  (NOT NULL)
+ * @n2: name to append (NOT NULL)
+ *
+ * Returns: new name or NULL on error
+ */
+static char *new_compound_name(const char *n1, const char *n2)
+{
+	char *name = kmalloc(strlen(n1) + strlen(n2) + 3, GFP_KERNEL);
+	if (name)
+		sprintf(name, "%s//%s", n1, n2);
+	return name;
+}
+
+/**
+ * aa_change_hat - change hat to/from subprofile
+ * @hats: vector of hat names to try changing into (MAYBE NULL if @count == 0)
+ * @count: number of hat names in @hats
+ * @token: magic value to validate the hat change
+ * @permtest: true if this is just a permission test
+ *
+ * Change to the first profile specified in @hats that exists, and store
+ * the @hat_magic in the current task context.  If the count == 0 and the
+ * @token matches that stored in the current task context, return to the
+ * top level profile.
+ *
+ * Returns %0 on success, error otherwise.
+ */
+int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
+{
+	const struct cred *cred;
+	struct aa_task_cxt *cxt;
+	struct aa_profile *profile, *previous_profile, *hat = NULL;
+	char *name = NULL;
+	int i;
+	struct file_perms perms = {};
+	const char *target = NULL, *info = NULL;
+	int error = 0;
+
+	/*
+	 * Fail explicitly requested domain transitions if no_new_privs.
+	 * There is no exception for unconfined as change_hat is not
+	 * available.
+	 */
+	if (task_no_new_privs(current))
+		return -EPERM;
+
+	/* released below */
+	cred = get_current_cred();
+	cxt = cred_cxt(cred);
+	profile = aa_get_newest_profile(aa_cred_profile(cred));
+	previous_profile = aa_get_newest_profile(cxt->previous);
+
+	if (unconfined(profile)) {
+		info = "unconfined";
+		error = -EPERM;
+		goto audit;
+	}
+
+	if (count) {
+		/* attempting to change into a new hat or switch to a sibling */
+		struct aa_profile *root;
+		if (PROFILE_IS_HAT(profile))
+			root = aa_get_profile_rcu(&profile->parent);
+		else
+			root = aa_get_profile(profile);
+
+		/* find first matching hat */
+		for (i = 0; i < count && !hat; i++)
+			/* released below */
+			hat = aa_find_child(root, hats[i]);
+		if (!hat) {
+			if (!COMPLAIN_MODE(root) || permtest) {
+				if (list_empty(&root->base.profiles))
+					error = -ECHILD;
+				else
+					error = -ENOENT;
+				aa_put_profile(root);
+				goto out;
+			}
+
+			/*
+			 * In complain mode and failed to match any hats.
+			 * Audit the failure is based off of the first hat
+			 * supplied.  This is done due how userspace
+			 * interacts with change_hat.
+			 *
+			 * TODO: Add logging of all failed hats
+			 */
+
+			/* freed below */
+			name = new_compound_name(root->base.hname, hats[0]);
+			aa_put_profile(root);
+			target = name;
+			/* released below */
+			hat = aa_new_null_profile(profile, 1);
+			if (!hat) {
+				info = "failed null profile create";
+				error = -ENOMEM;
+				goto audit;
+			}
+		} else {
+			aa_put_profile(root);
+			target = hat->base.hname;
+			if (!PROFILE_IS_HAT(hat)) {
+				info = "target not hat";
+				error = -EPERM;
+				goto audit;
+			}
+		}
+
+		error = may_change_ptraced_domain(hat);
+		if (error) {
+			info = "ptraced";
+			error = -EPERM;
+			goto audit;
+		}
+
+		if (!permtest) {
+			error = aa_set_current_hat(hat, token);
+			if (error == -EACCES)
+				/* kill task in case of brute force attacks */
+				perms.kill = AA_MAY_CHANGEHAT;
+			else if (name && !error)
+				/* reset error for learning of new hats */
+				error = -ENOENT;
+		}
+	} else if (previous_profile) {
+		/* Return to saved profile.  Kill task if restore fails
+		 * to avoid brute force attacks
+		 */
+		target = previous_profile->base.hname;
+		error = aa_restore_previous_profile(token);
+		perms.kill = AA_MAY_CHANGEHAT;
+	} else
+		/* ignore restores when there is no saved profile */
+		goto out;
+
+audit:
+	if (!permtest)
+		error = aa_audit_file(profile, &perms, GFP_KERNEL,
+				      OP_CHANGE_HAT, AA_MAY_CHANGEHAT, NULL,
+				      target, GLOBAL_ROOT_UID, info, error);
+
+out:
+	aa_put_profile(hat);
+	kfree(name);
+	aa_put_profile(profile);
+	aa_put_profile(previous_profile);
+	put_cred(cred);
+
+	return error;
+}
+
+/**
+ * aa_change_profile - perform a one-way profile transition
+ * @ns_name: name of the profile namespace to change to (MAYBE NULL)
+ * @hname: name of profile to change to (MAYBE NULL)
+ * @onexec: whether this transition is to take place immediately or at exec
+ * @permtest: true if this is just a permission test
+ *
+ * Change to new profile @name.  Unlike with hats, there is no way
+ * to change back.  If @name isn't specified the current profile name is
+ * used.
+ * If @onexec then the transition is delayed until
+ * the next exec.
+ *
+ * Returns %0 on success, error otherwise.
+ */
+int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
+		      bool permtest)
+{
+	const struct cred *cred;
+	struct aa_profile *profile, *target = NULL;
+	struct aa_namespace *ns = NULL;
+	struct file_perms perms = {};
+	const char *name = NULL, *info = NULL;
+	int op, error = 0;
+	u32 request;
+
+	if (!hname && !ns_name)
+		return -EINVAL;
+
+	if (onexec) {
+		request = AA_MAY_ONEXEC;
+		op = OP_CHANGE_ONEXEC;
+	} else {
+		request = AA_MAY_CHANGE_PROFILE;
+		op = OP_CHANGE_PROFILE;
+	}
+
+	cred = get_current_cred();
+	profile = aa_cred_profile(cred);
+
+	/*
+	 * Fail explicitly requested domain transitions if no_new_privs
+	 * and not unconfined.
+	 * Domain transitions from unconfined are allowed even when
+	 * no_new_privs is set because this aways results in a reduction
+	 * of permissions.
+	 */
+	if (task_no_new_privs(current) && !unconfined(profile)) {
+		put_cred(cred);
+		return -EPERM;
+	}
+
+	if (ns_name) {
+		/* released below */
+		ns = aa_find_namespace(profile->ns, ns_name);
+		if (!ns) {
+			/* we don't create new namespace in complain mode */
+			name = ns_name;
+			info = "namespace not found";
+			error = -ENOENT;
+			goto audit;
+		}
+	} else
+		/* released below */
+		ns = aa_get_namespace(profile->ns);
+
+	/* if the name was not specified, use the name of the current profile */
+	if (!hname) {
+		if (unconfined(profile))
+			hname = ns->unconfined->base.hname;
+		else
+			hname = profile->base.hname;
+	}
+
+	perms = change_profile_perms(profile, ns, hname, request,
+				     profile->file.start);
+	if (!(perms.allow & request)) {
+		error = -EACCES;
+		goto audit;
+	}
+
+	/* released below */
+	target = aa_lookup_profile(ns, hname);
+	if (!target) {
+		info = "profile not found";
+		error = -ENOENT;
+		if (permtest || !COMPLAIN_MODE(profile))
+			goto audit;
+		/* released below */
+		target = aa_new_null_profile(profile, 0);
+		if (!target) {
+			info = "failed null profile create";
+			error = -ENOMEM;
+			goto audit;
+		}
+	}
+
+	/* check if tracing task is allowed to trace target domain */
+	error = may_change_ptraced_domain(target);
+	if (error) {
+		info = "ptrace prevents transition";
+		goto audit;
+	}
+
+	if (permtest)
+		goto audit;
+
+	if (onexec)
+		error = aa_set_current_onexec(target);
+	else
+		error = aa_replace_current_profile(target);
+
+audit:
+	if (!permtest)
+		error = aa_audit_file(profile, &perms, GFP_KERNEL, op, request,
+				      name, hname, GLOBAL_ROOT_UID, info, error);
+
+	aa_put_namespace(ns);
+	aa_put_profile(target);
+	put_cred(cred);
+
+	return error;
+}
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
new file mode 100644
index 0000000..913f377
--- /dev/null
+++ b/security/apparmor/file.c
@@ -0,0 +1,458 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor mediation of files
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/file.h"
+#include "include/match.h"
+#include "include/path.h"
+#include "include/policy.h"
+
+struct file_perms nullperms;
+
+
+/**
+ * audit_file_mask - convert mask to permission string
+ * @buffer: buffer to write string to (NOT NULL)
+ * @mask: permission mask to convert
+ */
+static void audit_file_mask(struct audit_buffer *ab, u32 mask)
+{
+	char str[10];
+
+	char *m = str;
+
+	if (mask & AA_EXEC_MMAP)
+		*m++ = 'm';
+	if (mask & (MAY_READ | AA_MAY_META_READ))
+		*m++ = 'r';
+	if (mask & (MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_CHMOD |
+		    AA_MAY_CHOWN))
+		*m++ = 'w';
+	else if (mask & MAY_APPEND)
+		*m++ = 'a';
+	if (mask & AA_MAY_CREATE)
+		*m++ = 'c';
+	if (mask & AA_MAY_DELETE)
+		*m++ = 'd';
+	if (mask & AA_MAY_LINK)
+		*m++ = 'l';
+	if (mask & AA_MAY_LOCK)
+		*m++ = 'k';
+	if (mask & MAY_EXEC)
+		*m++ = 'x';
+	*m = '\0';
+
+	audit_log_string(ab, str);
+}
+
+/**
+ * file_audit_cb - call back for file specific audit fields
+ * @ab: audit_buffer  (NOT NULL)
+ * @va: audit struct to audit values of  (NOT NULL)
+ */
+static void file_audit_cb(struct audit_buffer *ab, void *va)
+{
+	struct common_audit_data *sa = va;
+	kuid_t fsuid = current_fsuid();
+
+	if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
+		audit_log_format(ab, " requested_mask=");
+		audit_file_mask(ab, sa->aad->fs.request);
+	}
+	if (sa->aad->fs.denied & AA_AUDIT_FILE_MASK) {
+		audit_log_format(ab, " denied_mask=");
+		audit_file_mask(ab, sa->aad->fs.denied);
+	}
+	if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
+		audit_log_format(ab, " fsuid=%d",
+				 from_kuid(&init_user_ns, fsuid));
+		audit_log_format(ab, " ouid=%d",
+				 from_kuid(&init_user_ns, sa->aad->fs.ouid));
+	}
+
+	if (sa->aad->fs.target) {
+		audit_log_format(ab, " target=");
+		audit_log_untrustedstring(ab, sa->aad->fs.target);
+	}
+}
+
+/**
+ * aa_audit_file - handle the auditing of file operations
+ * @profile: the profile being enforced  (NOT NULL)
+ * @perms: the permissions computed for the request (NOT NULL)
+ * @gfp: allocation flags
+ * @op: operation being mediated
+ * @request: permissions requested
+ * @name: name of object being mediated (MAYBE NULL)
+ * @target: name of target (MAYBE NULL)
+ * @ouid: object uid
+ * @info: extra information message (MAYBE NULL)
+ * @error: 0 if operation allowed else failure error code
+ *
+ * Returns: %0 or error on failure
+ */
+int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
+		  gfp_t gfp, int op, u32 request, const char *name,
+		  const char *target, kuid_t ouid, const char *info, int error)
+{
+	int type = AUDIT_APPARMOR_AUTO;
+	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
+	sa.type = LSM_AUDIT_DATA_NONE;
+	sa.aad = &aad;
+	aad.op = op,
+	aad.fs.request = request;
+	aad.name = name;
+	aad.fs.target = target;
+	aad.fs.ouid = ouid;
+	aad.info = info;
+	aad.error = error;
+
+	if (likely(!sa.aad->error)) {
+		u32 mask = perms->audit;
+
+		if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
+			mask = 0xffff;
+
+		/* mask off perms that are not being force audited */
+		sa.aad->fs.request &= mask;
+
+		if (likely(!sa.aad->fs.request))
+			return 0;
+		type = AUDIT_APPARMOR_AUDIT;
+	} else {
+		/* only report permissions that were denied */
+		sa.aad->fs.request = sa.aad->fs.request & ~perms->allow;
+
+		if (sa.aad->fs.request & perms->kill)
+			type = AUDIT_APPARMOR_KILL;
+
+		/* quiet known rejects, assumes quiet and kill do not overlap */
+		if ((sa.aad->fs.request & perms->quiet) &&
+		    AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+		    AUDIT_MODE(profile) != AUDIT_ALL)
+			sa.aad->fs.request &= ~perms->quiet;
+
+		if (!sa.aad->fs.request)
+			return COMPLAIN_MODE(profile) ? 0 : sa.aad->error;
+	}
+
+	sa.aad->fs.denied = sa.aad->fs.request & ~perms->allow;
+	return aa_audit(type, profile, gfp, &sa, file_audit_cb);
+}
+
+/**
+ * map_old_perms - map old file perms layout to the new layout
+ * @old: permission set in old mapping
+ *
+ * Returns: new permission mapping
+ */
+static u32 map_old_perms(u32 old)
+{
+	u32 new = old & 0xf;
+	if (old & MAY_READ)
+		new |= AA_MAY_META_READ;
+	if (old & MAY_WRITE)
+		new |= AA_MAY_META_WRITE | AA_MAY_CREATE | AA_MAY_DELETE |
+			AA_MAY_CHMOD | AA_MAY_CHOWN;
+	if (old & 0x10)
+		new |= AA_MAY_LINK;
+	/* the old mapping lock and link_subset flags where overlaid
+	 * and use was determined by part of a pair that they were in
+	 */
+	if (old & 0x20)
+		new |= AA_MAY_LOCK | AA_LINK_SUBSET;
+	if (old & 0x40)	/* AA_EXEC_MMAP */
+		new |= AA_EXEC_MMAP;
+
+	return new;
+}
+
+/**
+ * compute_perms - convert dfa compressed perms to internal perms
+ * @dfa: dfa to compute perms for   (NOT NULL)
+ * @state: state in dfa
+ * @cond:  conditions to consider  (NOT NULL)
+ *
+ * TODO: convert from dfa + state to permission entry, do computation conversion
+ *       at load time.
+ *
+ * Returns: computed permission set
+ */
+static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state,
+				       struct path_cond *cond)
+{
+	struct file_perms perms;
+
+	/* FIXME: change over to new dfa format
+	 * currently file perms are encoded in the dfa, new format
+	 * splits the permissions from the dfa.  This mapping can be
+	 * done at profile load
+	 */
+	perms.kill = 0;
+
+	if (uid_eq(current_fsuid(), cond->uid)) {
+		perms.allow = map_old_perms(dfa_user_allow(dfa, state));
+		perms.audit = map_old_perms(dfa_user_audit(dfa, state));
+		perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
+		perms.xindex = dfa_user_xindex(dfa, state);
+	} else {
+		perms.allow = map_old_perms(dfa_other_allow(dfa, state));
+		perms.audit = map_old_perms(dfa_other_audit(dfa, state));
+		perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
+		perms.xindex = dfa_other_xindex(dfa, state);
+	}
+	perms.allow |= AA_MAY_META_READ;
+
+	/* change_profile wasn't determined by ownership in old mapping */
+	if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
+		perms.allow |= AA_MAY_CHANGE_PROFILE;
+	if (ACCEPT_TABLE(dfa)[state] & 0x40000000)
+		perms.allow |= AA_MAY_ONEXEC;
+
+	return perms;
+}
+
+/**
+ * aa_str_perms - find permission that match @name
+ * @dfa: to match against  (MAYBE NULL)
+ * @state: state to start matching in
+ * @name: string to match against dfa  (NOT NULL)
+ * @cond: conditions to consider for permission set computation  (NOT NULL)
+ * @perms: Returns - the permissions found when matching @name
+ *
+ * Returns: the final state in @dfa when beginning @start and walking @name
+ */
+unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
+			  const char *name, struct path_cond *cond,
+			  struct file_perms *perms)
+{
+	unsigned int state;
+	if (!dfa) {
+		*perms = nullperms;
+		return DFA_NOMATCH;
+	}
+
+	state = aa_dfa_match(dfa, start, name);
+	*perms = compute_perms(dfa, state, cond);
+
+	return state;
+}
+
+/**
+ * is_deleted - test if a file has been completely unlinked
+ * @dentry: dentry of file to test for deletion  (NOT NULL)
+ *
+ * Returns: %1 if deleted else %0
+ */
+static inline bool is_deleted(struct dentry *dentry)
+{
+	if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
+		return 1;
+	return 0;
+}
+
+/**
+ * aa_path_perm - do permissions check & audit for @path
+ * @op: operation being checked
+ * @profile: profile being enforced  (NOT NULL)
+ * @path: path to check permissions of  (NOT NULL)
+ * @flags: any additional path flags beyond what the profile specifies
+ * @request: requested permissions
+ * @cond: conditional info for this request  (NOT NULL)
+ *
+ * Returns: %0 else error if access denied or other error
+ */
+int aa_path_perm(int op, struct aa_profile *profile, struct path *path,
+		 int flags, u32 request, struct path_cond *cond)
+{
+	char *buffer = NULL;
+	struct file_perms perms = {};
+	const char *name, *info = NULL;
+	int error;
+
+	flags |= profile->path_flags | (S_ISDIR(cond->mode) ? PATH_IS_DIR : 0);
+	error = aa_path_name(path, flags, &buffer, &name, &info);
+	if (error) {
+		if (error == -ENOENT && is_deleted(path->dentry)) {
+			/* Access to open files that are deleted are
+			 * give a pass (implicit delegation)
+			 */
+			error = 0;
+			info = NULL;
+			perms.allow = request;
+		}
+	} else {
+		aa_str_perms(profile->file.dfa, profile->file.start, name, cond,
+			     &perms);
+		if (request & ~perms.allow)
+			error = -EACCES;
+	}
+	error = aa_audit_file(profile, &perms, GFP_KERNEL, op, request, name,
+			      NULL, cond->uid, info, error);
+	kfree(buffer);
+
+	return error;
+}
+
+/**
+ * xindex_is_subset - helper for aa_path_link
+ * @link: link permission set
+ * @target: target permission set
+ *
+ * test target x permissions are equal OR a subset of link x permissions
+ * this is done as part of the subset test, where a hardlink must have
+ * a subset of permissions that the target has.
+ *
+ * Returns: %1 if subset else %0
+ */
+static inline bool xindex_is_subset(u32 link, u32 target)
+{
+	if (((link & ~AA_X_UNSAFE) != (target & ~AA_X_UNSAFE)) ||
+	    ((link & AA_X_UNSAFE) && !(target & AA_X_UNSAFE)))
+		return 0;
+
+	return 1;
+}
+
+/**
+ * aa_path_link - Handle hard link permission check
+ * @profile: the profile being enforced  (NOT NULL)
+ * @old_dentry: the target dentry  (NOT NULL)
+ * @new_dir: directory the new link will be created in  (NOT NULL)
+ * @new_dentry: the link being created  (NOT NULL)
+ *
+ * Handle the permission test for a link & target pair.  Permission
+ * is encoded as a pair where the link permission is determined
+ * first, and if allowed, the target is tested.  The target test
+ * is done from the point of the link match (not start of DFA)
+ * making the target permission dependent on the link permission match.
+ *
+ * The subset test if required forces that permissions granted
+ * on link are a subset of the permission granted to target.
+ *
+ * Returns: %0 if allowed else error
+ */
+int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
+		 struct path *new_dir, struct dentry *new_dentry)
+{
+	struct path link = { new_dir->mnt, new_dentry };
+	struct path target = { new_dir->mnt, old_dentry };
+	struct path_cond cond = {
+		d_backing_inode(old_dentry)->i_uid,
+		d_backing_inode(old_dentry)->i_mode
+	};
+	char *buffer = NULL, *buffer2 = NULL;
+	const char *lname, *tname = NULL, *info = NULL;
+	struct file_perms lperms, perms;
+	u32 request = AA_MAY_LINK;
+	unsigned int state;
+	int error;
+
+	lperms = nullperms;
+
+	/* buffer freed below, lname is pointer in buffer */
+	error = aa_path_name(&link, profile->path_flags, &buffer, &lname,
+			     &info);
+	if (error)
+		goto audit;
+
+	/* buffer2 freed below, tname is pointer in buffer2 */
+	error = aa_path_name(&target, profile->path_flags, &buffer2, &tname,
+			     &info);
+	if (error)
+		goto audit;
+
+	error = -EACCES;
+	/* aa_str_perms - handles the case of the dfa being NULL */
+	state = aa_str_perms(profile->file.dfa, profile->file.start, lname,
+			     &cond, &lperms);
+
+	if (!(lperms.allow & AA_MAY_LINK))
+		goto audit;
+
+	/* test to see if target can be paired with link */
+	state = aa_dfa_null_transition(profile->file.dfa, state);
+	aa_str_perms(profile->file.dfa, state, tname, &cond, &perms);
+
+	/* force audit/quiet masks for link are stored in the second entry
+	 * in the link pair.
+	 */
+	lperms.audit = perms.audit;
+	lperms.quiet = perms.quiet;
+	lperms.kill = perms.kill;
+
+	if (!(perms.allow & AA_MAY_LINK)) {
+		info = "target restricted";
+		goto audit;
+	}
+
+	/* done if link subset test is not required */
+	if (!(perms.allow & AA_LINK_SUBSET))
+		goto done_tests;
+
+	/* Do link perm subset test requiring allowed permission on link are a
+	 * subset of the allowed permissions on target.
+	 */
+	aa_str_perms(profile->file.dfa, profile->file.start, tname, &cond,
+		     &perms);
+
+	/* AA_MAY_LINK is not considered in the subset test */
+	request = lperms.allow & ~AA_MAY_LINK;
+	lperms.allow &= perms.allow | AA_MAY_LINK;
+
+	request |= AA_AUDIT_FILE_MASK & (lperms.allow & ~perms.allow);
+	if (request & ~lperms.allow) {
+		goto audit;
+	} else if ((lperms.allow & MAY_EXEC) &&
+		   !xindex_is_subset(lperms.xindex, perms.xindex)) {
+		lperms.allow &= ~MAY_EXEC;
+		request |= MAY_EXEC;
+		info = "link not subset of target";
+		goto audit;
+	}
+
+done_tests:
+	error = 0;
+
+audit:
+	error = aa_audit_file(profile, &lperms, GFP_KERNEL, OP_LINK, request,
+			      lname, tname, cond.uid, info, error);
+	kfree(buffer);
+	kfree(buffer2);
+
+	return error;
+}
+
+/**
+ * aa_file_perm - do permission revalidation check & audit for @file
+ * @op: operation being checked
+ * @profile: profile being enforced   (NOT NULL)
+ * @file: file to revalidate access permissions on  (NOT NULL)
+ * @request: requested permissions
+ *
+ * Returns: %0 if access allowed else error
+ */
+int aa_file_perm(int op, struct aa_profile *profile, struct file *file,
+		 u32 request)
+{
+	struct path_cond cond = {
+		.uid = file_inode(file)->i_uid,
+		.mode = file_inode(file)->i_mode
+	};
+
+	return aa_path_perm(op, profile, &file->f_path, PATH_DELEGATE_DELETED,
+			    request, &cond);
+}
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
new file mode 100644
index 0000000..e4ea626
--- /dev/null
+++ b/security/apparmor/include/apparmor.h
@@ -0,0 +1,120 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor basic global and lib definitions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __APPARMOR_H
+#define __APPARMOR_H
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+
+#include "match.h"
+
+/*
+ * Class of mediation types in the AppArmor policy db
+ */
+#define AA_CLASS_ENTRY		0
+#define AA_CLASS_UNKNOWN	1
+#define AA_CLASS_FILE		2
+#define AA_CLASS_CAP		3
+#define AA_CLASS_NET		4
+#define AA_CLASS_RLIMITS	5
+#define AA_CLASS_DOMAIN		6
+
+#define AA_CLASS_LAST		AA_CLASS_DOMAIN
+
+/* Control parameters settable through module/boot flags */
+extern enum audit_mode aa_g_audit;
+extern bool aa_g_audit_header;
+extern bool aa_g_debug;
+extern bool aa_g_lock_policy;
+extern bool aa_g_logsyscall;
+extern bool aa_g_paranoid_load;
+extern unsigned int aa_g_path_max;
+
+/*
+ * DEBUG remains global (no per profile flag) since it is mostly used in sysctl
+ * which is not related to profile accesses.
+ */
+
+#define AA_DEBUG(fmt, args...)						\
+	do {								\
+		if (aa_g_debug && printk_ratelimit())			\
+			printk(KERN_DEBUG "AppArmor: " fmt, ##args);	\
+	} while (0)
+
+#define AA_ERROR(fmt, args...)						\
+	do {								\
+		if (printk_ratelimit())					\
+			printk(KERN_ERR "AppArmor: " fmt, ##args);	\
+	} while (0)
+
+/* Flag indicating whether initialization completed */
+extern int apparmor_initialized __initdata;
+
+/* fn's in lib */
+char *aa_split_fqname(char *args, char **ns_name);
+void aa_info_message(const char *str);
+void *__aa_kvmalloc(size_t size, gfp_t flags);
+
+static inline void *kvmalloc(size_t size)
+{
+	return __aa_kvmalloc(size, 0);
+}
+
+static inline void *kvzalloc(size_t size)
+{
+	return __aa_kvmalloc(size, __GFP_ZERO);
+}
+
+/* returns 0 if kref not incremented */
+static inline int kref_get_not0(struct kref *kref)
+{
+	return atomic_inc_not_zero(&kref->refcount);
+}
+
+/**
+ * aa_strneq - compare null terminated @str to a non null terminated substring
+ * @str: a null terminated string
+ * @sub: a substring, not necessarily null terminated
+ * @len: length of @sub to compare
+ *
+ * The @str string must be full consumed for this to be considered a match
+ */
+static inline bool aa_strneq(const char *str, const char *sub, int len)
+{
+	return !strncmp(str, sub, len) && !str[len];
+}
+
+/**
+ * aa_dfa_null_transition - step to next state after null character
+ * @dfa: the dfa to match against
+ * @start: the state of the dfa to start matching in
+ *
+ * aa_dfa_null_transition transitions to the next state after a null
+ * character which is not used in standard matching and is only
+ * used to separate pairs.
+ */
+static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
+						  unsigned int start)
+{
+	/* the null transition only needs the string's null terminator byte */
+	return aa_dfa_next(dfa, start, 0);
+}
+
+static inline bool mediated_filesystem(struct dentry *dentry)
+{
+	return !(dentry->d_sb->s_flags & MS_NOUSER);
+}
+
+#endif /* __APPARMOR_H */
diff --git a/security/apparmor/include/apparmorfs.h b/security/apparmor/include/apparmorfs.h
new file mode 100644
index 0000000..414e568
--- /dev/null
+++ b/security/apparmor/include/apparmorfs.h
@@ -0,0 +1,104 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor filesystem definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_APPARMORFS_H
+#define __AA_APPARMORFS_H
+
+enum aa_fs_type {
+	AA_FS_TYPE_BOOLEAN,
+	AA_FS_TYPE_STRING,
+	AA_FS_TYPE_U64,
+	AA_FS_TYPE_FOPS,
+	AA_FS_TYPE_DIR,
+};
+
+struct aa_fs_entry;
+
+struct aa_fs_entry {
+	const char *name;
+	struct dentry *dentry;
+	umode_t mode;
+	enum aa_fs_type v_type;
+	union {
+		bool boolean;
+		char *string;
+		unsigned long u64;
+		struct aa_fs_entry *files;
+	} v;
+	const struct file_operations *file_ops;
+};
+
+extern const struct file_operations aa_fs_seq_file_ops;
+
+#define AA_FS_FILE_BOOLEAN(_name, _value) \
+	{ .name = (_name), .mode = 0444, \
+	  .v_type = AA_FS_TYPE_BOOLEAN, .v.boolean = (_value), \
+	  .file_ops = &aa_fs_seq_file_ops }
+#define AA_FS_FILE_STRING(_name, _value) \
+	{ .name = (_name), .mode = 0444, \
+	  .v_type = AA_FS_TYPE_STRING, .v.string = (_value), \
+	  .file_ops = &aa_fs_seq_file_ops }
+#define AA_FS_FILE_U64(_name, _value) \
+	{ .name = (_name), .mode = 0444, \
+	  .v_type = AA_FS_TYPE_U64, .v.u64 = (_value), \
+	  .file_ops = &aa_fs_seq_file_ops }
+#define AA_FS_FILE_FOPS(_name, _mode, _fops) \
+	{ .name = (_name), .v_type = AA_FS_TYPE_FOPS, \
+	  .mode = (_mode), .file_ops = (_fops) }
+#define AA_FS_DIR(_name, _value) \
+	{ .name = (_name), .v_type = AA_FS_TYPE_DIR, .v.files = (_value) }
+
+extern void __init aa_destroy_aafs(void);
+
+struct aa_profile;
+struct aa_namespace;
+
+enum aafs_ns_type {
+	AAFS_NS_DIR,
+	AAFS_NS_PROFS,
+	AAFS_NS_NS,
+	AAFS_NS_COUNT,
+	AAFS_NS_MAX_COUNT,
+	AAFS_NS_SIZE,
+	AAFS_NS_MAX_SIZE,
+	AAFS_NS_OWNER,
+	AAFS_NS_SIZEOF,
+};
+
+enum aafs_prof_type {
+	AAFS_PROF_DIR,
+	AAFS_PROF_PROFS,
+	AAFS_PROF_NAME,
+	AAFS_PROF_MODE,
+	AAFS_PROF_ATTACH,
+	AAFS_PROF_HASH,
+	AAFS_PROF_SIZEOF,
+};
+
+#define ns_dir(X) ((X)->dents[AAFS_NS_DIR])
+#define ns_subns_dir(X) ((X)->dents[AAFS_NS_NS])
+#define ns_subprofs_dir(X) ((X)->dents[AAFS_NS_PROFS])
+
+#define prof_dir(X) ((X)->dents[AAFS_PROF_DIR])
+#define prof_child_dir(X) ((X)->dents[AAFS_PROF_PROFS])
+
+void __aa_fs_profile_rmdir(struct aa_profile *profile);
+void __aa_fs_profile_migrate_dents(struct aa_profile *old,
+				   struct aa_profile *new);
+int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent);
+void __aa_fs_namespace_rmdir(struct aa_namespace *ns);
+int __aa_fs_namespace_mkdir(struct aa_namespace *ns, struct dentry *parent,
+			    const char *name);
+
+#endif /* __AA_APPARMORFS_H */
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
new file mode 100644
index 0000000..ba3dfd1
--- /dev/null
+++ b/security/apparmor/include/audit.h
@@ -0,0 +1,147 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor auditing function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_AUDIT_H
+#define __AA_AUDIT_H
+
+#include <linux/audit.h>
+#include <linux/fs.h>
+#include <linux/lsm_audit.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "file.h"
+
+struct aa_profile;
+
+extern const char *const audit_mode_names[];
+#define AUDIT_MAX_INDEX 5
+enum audit_mode {
+	AUDIT_NORMAL,		/* follow normal auditing of accesses */
+	AUDIT_QUIET_DENIED,	/* quiet all denied access messages */
+	AUDIT_QUIET,		/* quiet all messages */
+	AUDIT_NOQUIET,		/* do not quiet audit messages */
+	AUDIT_ALL		/* audit all accesses */
+};
+
+enum audit_type {
+	AUDIT_APPARMOR_AUDIT,
+	AUDIT_APPARMOR_ALLOWED,
+	AUDIT_APPARMOR_DENIED,
+	AUDIT_APPARMOR_HINT,
+	AUDIT_APPARMOR_STATUS,
+	AUDIT_APPARMOR_ERROR,
+	AUDIT_APPARMOR_KILL,
+	AUDIT_APPARMOR_AUTO
+};
+
+extern const char *const op_table[];
+enum aa_ops {
+	OP_NULL,
+
+	OP_SYSCTL,
+	OP_CAPABLE,
+
+	OP_UNLINK,
+	OP_MKDIR,
+	OP_RMDIR,
+	OP_MKNOD,
+	OP_TRUNC,
+	OP_LINK,
+	OP_SYMLINK,
+	OP_RENAME_SRC,
+	OP_RENAME_DEST,
+	OP_CHMOD,
+	OP_CHOWN,
+	OP_GETATTR,
+	OP_OPEN,
+
+	OP_FPERM,
+	OP_FLOCK,
+	OP_FMMAP,
+	OP_FMPROT,
+
+	OP_CREATE,
+	OP_POST_CREATE,
+	OP_BIND,
+	OP_CONNECT,
+	OP_LISTEN,
+	OP_ACCEPT,
+	OP_SENDMSG,
+	OP_RECVMSG,
+	OP_GETSOCKNAME,
+	OP_GETPEERNAME,
+	OP_GETSOCKOPT,
+	OP_SETSOCKOPT,
+	OP_SOCK_SHUTDOWN,
+
+	OP_PTRACE,
+
+	OP_EXEC,
+	OP_CHANGE_HAT,
+	OP_CHANGE_PROFILE,
+	OP_CHANGE_ONEXEC,
+
+	OP_SETPROCATTR,
+	OP_SETRLIMIT,
+
+	OP_PROF_REPL,
+	OP_PROF_LOAD,
+	OP_PROF_RM,
+};
+
+
+struct apparmor_audit_data {
+	int error;
+	int op;
+	int type;
+	void *profile;
+	const char *name;
+	const char *info;
+	union {
+		void *target;
+		struct {
+			long pos;
+			void *target;
+		} iface;
+		struct {
+			int rlim;
+			unsigned long max;
+		} rlim;
+		struct {
+			const char *target;
+			u32 request;
+			u32 denied;
+			kuid_t ouid;
+		} fs;
+	};
+};
+
+/* define a short hand for apparmor_audit_data structure */
+#define aad apparmor_audit_data
+
+void aa_audit_msg(int type, struct common_audit_data *sa,
+		  void (*cb) (struct audit_buffer *, void *));
+int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
+	     struct common_audit_data *sa,
+	     void (*cb) (struct audit_buffer *, void *));
+
+static inline int complain_error(int error)
+{
+	if (error == -EPERM || error == -EACCES)
+		return 0;
+	return error;
+}
+
+#endif /* __AA_AUDIT_H */
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
new file mode 100644
index 0000000..fc3fa38
--- /dev/null
+++ b/security/apparmor/include/capability.h
@@ -0,0 +1,48 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor capability mediation definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2013 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_CAPABILITY_H
+#define __AA_CAPABILITY_H
+
+#include <linux/sched.h>
+
+#include "apparmorfs.h"
+
+struct aa_profile;
+
+/* aa_caps - confinement data for capabilities
+ * @allowed: capabilities mask
+ * @audit: caps that are to be audited
+ * @quiet: caps that should not be audited
+ * @kill: caps that when requested will result in the task being killed
+ * @extended: caps that are subject finer grained mediation
+ */
+struct aa_caps {
+	kernel_cap_t allow;
+	kernel_cap_t audit;
+	kernel_cap_t quiet;
+	kernel_cap_t kill;
+	kernel_cap_t extended;
+};
+
+extern struct aa_fs_entry aa_fs_entry_caps[];
+
+int aa_capable(struct aa_profile *profile, int cap, int audit);
+
+static inline void aa_free_cap_rules(struct aa_caps *caps)
+{
+	/* NOP */
+}
+
+#endif /* __AA_CAPBILITY_H */
diff --git a/security/apparmor/include/context.h b/security/apparmor/include/context.h
new file mode 100644
index 0000000..6bf6579
--- /dev/null
+++ b/security/apparmor/include/context.h
@@ -0,0 +1,178 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor contexts used to associate "labels" to objects.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_CONTEXT_H
+#define __AA_CONTEXT_H
+
+#include <linux/cred.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "policy.h"
+
+#define cred_cxt(X) (X)->security
+#define current_cxt() cred_cxt(current_cred())
+
+/* struct aa_file_cxt - the AppArmor context the file was opened in
+ * @perms: the permission the file was opened with
+ *
+ * The file_cxt could currently be directly stored in file->f_security
+ * as the profile reference is now stored in the f_cred.  However the
+ * cxt struct will expand in the future so we keep the struct.
+ */
+struct aa_file_cxt {
+	u16 allow;
+};
+
+/**
+ * aa_alloc_file_context - allocate file_cxt
+ * @gfp: gfp flags for allocation
+ *
+ * Returns: file_cxt or NULL on failure
+ */
+static inline struct aa_file_cxt *aa_alloc_file_context(gfp_t gfp)
+{
+	return kzalloc(sizeof(struct aa_file_cxt), gfp);
+}
+
+/**
+ * aa_free_file_context - free a file_cxt
+ * @cxt: file_cxt to free  (MAYBE_NULL)
+ */
+static inline void aa_free_file_context(struct aa_file_cxt *cxt)
+{
+	if (cxt)
+		kzfree(cxt);
+}
+
+/**
+ * struct aa_task_cxt - primary label for confined tasks
+ * @profile: the current profile   (NOT NULL)
+ * @exec: profile to transition to on next exec  (MAYBE NULL)
+ * @previous: profile the task may return to     (MAYBE NULL)
+ * @token: magic value the task must know for returning to @previous_profile
+ *
+ * Contains the task's current profile (which could change due to
+ * change_hat).  Plus the hat_magic needed during change_hat.
+ *
+ * TODO: make so a task can be confined by a stack of contexts
+ */
+struct aa_task_cxt {
+	struct aa_profile *profile;
+	struct aa_profile *onexec;
+	struct aa_profile *previous;
+	u64 token;
+};
+
+struct aa_task_cxt *aa_alloc_task_context(gfp_t flags);
+void aa_free_task_context(struct aa_task_cxt *cxt);
+void aa_dup_task_context(struct aa_task_cxt *new,
+			 const struct aa_task_cxt *old);
+int aa_replace_current_profile(struct aa_profile *profile);
+int aa_set_current_onexec(struct aa_profile *profile);
+int aa_set_current_hat(struct aa_profile *profile, u64 token);
+int aa_restore_previous_profile(u64 cookie);
+struct aa_profile *aa_get_task_profile(struct task_struct *task);
+
+
+/**
+ * aa_cred_profile - obtain cred's profiles
+ * @cred: cred to obtain profiles from  (NOT NULL)
+ *
+ * Returns: confining profile
+ *
+ * does NOT increment reference count
+ */
+static inline struct aa_profile *aa_cred_profile(const struct cred *cred)
+{
+	struct aa_task_cxt *cxt = cred_cxt(cred);
+	BUG_ON(!cxt || !cxt->profile);
+	return cxt->profile;
+}
+
+/**
+ * __aa_task_profile - retrieve another task's profile
+ * @task: task to query  (NOT NULL)
+ *
+ * Returns: @task's profile without incrementing its ref count
+ *
+ * If @task != current needs to be called in RCU safe critical section
+ */
+static inline struct aa_profile *__aa_task_profile(struct task_struct *task)
+{
+	return aa_cred_profile(__task_cred(task));
+}
+
+/**
+ * __aa_task_is_confined - determine if @task has any confinement
+ * @task: task to check confinement of  (NOT NULL)
+ *
+ * If @task != current needs to be called in RCU safe critical section
+ */
+static inline bool __aa_task_is_confined(struct task_struct *task)
+{
+	return !unconfined(__aa_task_profile(task));
+}
+
+/**
+ * __aa_current_profile - find the current tasks confining profile
+ *
+ * Returns: up to date confining profile or the ns unconfined profile (NOT NULL)
+ *
+ * This fn will not update the tasks cred to the most up to date version
+ * of the profile so it is safe to call when inside of locks.
+ */
+static inline struct aa_profile *__aa_current_profile(void)
+{
+	return aa_cred_profile(current_cred());
+}
+
+/**
+ * aa_current_profile - find the current tasks confining profile and do updates
+ *
+ * Returns: up to date confining profile or the ns unconfined profile (NOT NULL)
+ *
+ * This fn will update the tasks cred structure if the profile has been
+ * replaced.  Not safe to call inside locks
+ */
+static inline struct aa_profile *aa_current_profile(void)
+{
+	const struct aa_task_cxt *cxt = current_cxt();
+	struct aa_profile *profile;
+	BUG_ON(!cxt || !cxt->profile);
+
+	if (PROFILE_INVALID(cxt->profile)) {
+		profile = aa_get_newest_profile(cxt->profile);
+		aa_replace_current_profile(profile);
+		aa_put_profile(profile);
+		cxt = current_cxt();
+	}
+
+	return cxt->profile;
+}
+
+/**
+ * aa_clear_task_cxt_trans - clear transition tracking info from the cxt
+ * @cxt: task context to clear (NOT NULL)
+ */
+static inline void aa_clear_task_cxt_trans(struct aa_task_cxt *cxt)
+{
+	aa_put_profile(cxt->previous);
+	aa_put_profile(cxt->onexec);
+	cxt->previous = NULL;
+	cxt->onexec = NULL;
+	cxt->token = 0;
+}
+
+#endif /* __AA_CONTEXT_H */
diff --git a/security/apparmor/include/crypto.h b/security/apparmor/include/crypto.h
new file mode 100644
index 0000000..dc418e5
--- /dev/null
+++ b/security/apparmor/include/crypto.h
@@ -0,0 +1,36 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright 2013 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __APPARMOR_CRYPTO_H
+#define __APPARMOR_CRYPTO_H
+
+#include "policy.h"
+
+#ifdef CONFIG_SECURITY_APPARMOR_HASH
+unsigned int aa_hash_size(void);
+int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
+			 size_t len);
+#else
+static inline int aa_calc_profile_hash(struct aa_profile *profile, u32 version,
+				       void *start, size_t len)
+{
+	return 0;
+}
+
+static inline unsigned int aa_hash_size(void)
+{
+	return 0;
+}
+#endif
+
+#endif /* __APPARMOR_CRYPTO_H */
diff --git a/security/apparmor/include/domain.h b/security/apparmor/include/domain.h
new file mode 100644
index 0000000..de04464
--- /dev/null
+++ b/security/apparmor/include/domain.h
@@ -0,0 +1,36 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor security domain transition function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/binfmts.h>
+#include <linux/types.h>
+
+#ifndef __AA_DOMAIN_H
+#define __AA_DOMAIN_H
+
+struct aa_domain {
+	int size;
+	char **table;
+};
+
+int apparmor_bprm_set_creds(struct linux_binprm *bprm);
+int apparmor_bprm_secureexec(struct linux_binprm *bprm);
+void apparmor_bprm_committing_creds(struct linux_binprm *bprm);
+void apparmor_bprm_committed_creds(struct linux_binprm *bprm);
+
+void aa_free_domain_entries(struct aa_domain *domain);
+int aa_change_hat(const char *hats[], int count, u64 token, bool permtest);
+int aa_change_profile(const char *ns_name, const char *name, bool onexec,
+		      bool permtest);
+
+#endif /* __AA_DOMAIN_H */
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
new file mode 100644
index 0000000..2c922b8
--- /dev/null
+++ b/security/apparmor/include/file.h
@@ -0,0 +1,216 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor file mediation function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_FILE_H
+#define __AA_FILE_H
+
+#include "domain.h"
+#include "match.h"
+
+struct aa_profile;
+struct path;
+
+/*
+ * We use MAY_EXEC, MAY_WRITE, MAY_READ, MAY_APPEND and the following flags
+ * for profile permissions
+ */
+#define AA_MAY_CREATE                  0x0010
+#define AA_MAY_DELETE                  0x0020
+#define AA_MAY_META_WRITE              0x0040
+#define AA_MAY_META_READ               0x0080
+
+#define AA_MAY_CHMOD                   0x0100
+#define AA_MAY_CHOWN                   0x0200
+#define AA_MAY_LOCK                    0x0400
+#define AA_EXEC_MMAP                   0x0800
+
+#define AA_MAY_LINK			0x1000
+#define AA_LINK_SUBSET			AA_MAY_LOCK	/* overlaid */
+#define AA_MAY_ONEXEC			0x40000000	/* exec allows onexec */
+#define AA_MAY_CHANGE_PROFILE		0x80000000
+#define AA_MAY_CHANGEHAT		0x80000000	/* ctrl auditing only */
+
+#define AA_AUDIT_FILE_MASK	(MAY_READ | MAY_WRITE | MAY_EXEC | MAY_APPEND |\
+				 AA_MAY_CREATE | AA_MAY_DELETE |	\
+				 AA_MAY_META_READ | AA_MAY_META_WRITE | \
+				 AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_LOCK | \
+				 AA_EXEC_MMAP | AA_MAY_LINK)
+
+/*
+ * The xindex is broken into 3 parts
+ * - index - an index into either the exec name table or the variable table
+ * - exec type - which determines how the executable name and index are used
+ * - flags - which modify how the destination name is applied
+ */
+#define AA_X_INDEX_MASK		0x03ff
+
+#define AA_X_TYPE_MASK		0x0c00
+#define AA_X_TYPE_SHIFT		10
+#define AA_X_NONE		0x0000
+#define AA_X_NAME		0x0400	/* use executable name px */
+#define AA_X_TABLE		0x0800	/* use a specified name ->n# */
+
+#define AA_X_UNSAFE		0x1000
+#define AA_X_CHILD		0x2000	/* make >AA_X_NONE apply to children */
+#define AA_X_INHERIT		0x4000
+#define AA_X_UNCONFINED		0x8000
+
+/* AA_SECURE_X_NEEDED - is passed in the bprm->unsafe field */
+#define AA_SECURE_X_NEEDED	0x8000
+
+/* need to make conditional which ones are being set */
+struct path_cond {
+	kuid_t uid;
+	umode_t mode;
+};
+
+/* struct file_perms - file permission
+ * @allow: mask of permissions that are allowed
+ * @audit: mask of permissions to force an audit message for
+ * @quiet: mask of permissions to quiet audit messages for
+ * @kill: mask of permissions that when matched will kill the task
+ * @xindex: exec transition index if @allow contains MAY_EXEC
+ *
+ * The @audit and @queit mask should be mutually exclusive.
+ */
+struct file_perms {
+	u32 allow;
+	u32 audit;
+	u32 quiet;
+	u32 kill;
+	u16 xindex;
+};
+
+extern struct file_perms nullperms;
+
+#define COMBINED_PERM_MASK(X) ((X).allow | (X).audit | (X).quiet | (X).kill)
+
+/* FIXME: split perms from dfa and match this to description
+ *        also add delegation info.
+ */
+static inline u16 dfa_map_xindex(u16 mask)
+{
+	u16 old_index = (mask >> 10) & 0xf;
+	u16 index = 0;
+
+	if (mask & 0x100)
+		index |= AA_X_UNSAFE;
+	if (mask & 0x200)
+		index |= AA_X_INHERIT;
+	if (mask & 0x80)
+		index |= AA_X_UNCONFINED;
+
+	if (old_index == 1) {
+		index |= AA_X_UNCONFINED;
+	} else if (old_index == 2) {
+		index |= AA_X_NAME;
+	} else if (old_index == 3) {
+		index |= AA_X_NAME | AA_X_CHILD;
+	} else if (old_index) {
+		index |= AA_X_TABLE;
+		index |= old_index - 4;
+	}
+
+	return index;
+}
+
+/*
+ * map old dfa inline permissions to new format
+ */
+#define dfa_user_allow(dfa, state) (((ACCEPT_TABLE(dfa)[state]) & 0x7f) | \
+				    ((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
+#define dfa_user_audit(dfa, state) ((ACCEPT_TABLE2(dfa)[state]) & 0x7f)
+#define dfa_user_quiet(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 7) & 0x7f)
+#define dfa_user_xindex(dfa, state) \
+	(dfa_map_xindex(ACCEPT_TABLE(dfa)[state] & 0x3fff))
+
+#define dfa_other_allow(dfa, state) ((((ACCEPT_TABLE(dfa)[state]) >> 14) & \
+				      0x7f) |				\
+				     ((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
+#define dfa_other_audit(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 14) & 0x7f)
+#define dfa_other_quiet(dfa, state) \
+	((((ACCEPT_TABLE2(dfa)[state]) >> 7) >> 14) & 0x7f)
+#define dfa_other_xindex(dfa, state) \
+	dfa_map_xindex((ACCEPT_TABLE(dfa)[state] >> 14) & 0x3fff)
+
+int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
+		  gfp_t gfp, int op, u32 request, const char *name,
+		  const char *target, kuid_t ouid, const char *info, int error);
+
+/**
+ * struct aa_file_rules - components used for file rule permissions
+ * @dfa: dfa to match path names and conditionals against
+ * @perms: permission table indexed by the matched state accept entry of @dfa
+ * @trans: transition table for indexed by named x transitions
+ *
+ * File permission are determined by matching a path against @dfa and then
+ * then using the value of the accept entry for the matching state as
+ * an index into @perms.  If a named exec transition is required it is
+ * looked up in the transition table.
+ */
+struct aa_file_rules {
+	unsigned int start;
+	struct aa_dfa *dfa;
+	/* struct perms perms; */
+	struct aa_domain trans;
+	/* TODO: add delegate table */
+};
+
+unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
+			  const char *name, struct path_cond *cond,
+			  struct file_perms *perms);
+
+int aa_path_perm(int op, struct aa_profile *profile, struct path *path,
+		 int flags, u32 request, struct path_cond *cond);
+
+int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
+		 struct path *new_dir, struct dentry *new_dentry);
+
+int aa_file_perm(int op, struct aa_profile *profile, struct file *file,
+		 u32 request);
+
+static inline void aa_free_file_rules(struct aa_file_rules *rules)
+{
+	aa_put_dfa(rules->dfa);
+	aa_free_domain_entries(&rules->trans);
+}
+
+/**
+ * aa_map_file_perms - map file flags to AppArmor permissions
+ * @file: open file to map flags to AppArmor permissions
+ *
+ * Returns: apparmor permission set for the file
+ */
+static inline u32 aa_map_file_to_perms(struct file *file)
+{
+	int flags = file->f_flags;
+	u32 perms = 0;
+
+	if (file->f_mode & FMODE_WRITE)
+		perms |= MAY_WRITE;
+	if (file->f_mode & FMODE_READ)
+		perms |= MAY_READ;
+
+	if ((flags & O_APPEND) && (perms & MAY_WRITE))
+		perms = (perms & ~MAY_WRITE) | MAY_APPEND;
+	/* trunc implies write permission */
+	if (flags & O_TRUNC)
+		perms |= MAY_WRITE;
+	if (flags & O_CREAT)
+		perms |= AA_MAY_CREATE;
+
+	return perms;
+}
+
+#endif /* __AA_FILE_H */
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
new file mode 100644
index 0000000..288ca76
--- /dev/null
+++ b/security/apparmor/include/ipc.h
@@ -0,0 +1,28 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor ipc mediation function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_IPC_H
+#define __AA_IPC_H
+
+#include <linux/sched.h>
+
+struct aa_profile;
+
+int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
+		  unsigned int mode);
+
+int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
+	      unsigned int mode);
+
+#endif /* __AA_IPC_H */
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
new file mode 100644
index 0000000..001c43a
--- /dev/null
+++ b/security/apparmor/include/match.h
@@ -0,0 +1,141 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy dfa matching engine definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2012 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_MATCH_H
+#define __AA_MATCH_H
+
+#include <linux/kref.h>
+
+#define DFA_NOMATCH			0
+#define DFA_START			1
+
+
+/**
+ * The format used for transition tables is based on the GNU flex table
+ * file format (--tables-file option; see Table File Format in the flex
+ * info pages and the flex sources for documentation). The magic number
+ * used in the header is 0x1B5E783D instead of 0xF13C57B1 though, because
+ * new tables have been defined and others YY_ID_CHK (check) and YY_ID_DEF
+ * (default) tables are used slightly differently (see the apparmor-parser
+ * package).
+ *
+ *
+ * The data in the packed dfa is stored in network byte order, and the tables
+ * are arranged for flexibility.  We convert the table data to host native
+ * byte order.
+ *
+ * The dfa begins with a table set header, and is followed by the actual
+ * tables.
+ */
+
+#define YYTH_MAGIC	0x1B5E783D
+
+struct table_set_header {
+	u32 th_magic;		/* YYTH_MAGIC */
+	u32 th_hsize;
+	u32 th_ssize;
+	u16 th_flags;
+	char th_version[];
+};
+
+/* The YYTD_ID are one less than flex table mappings.  The flex id
+ * has 1 subtracted at table load time, this allows us to directly use the
+ * ID's as indexes.
+ */
+#define	YYTD_ID_ACCEPT	0
+#define YYTD_ID_BASE	1
+#define YYTD_ID_CHK	2
+#define YYTD_ID_DEF	3
+#define YYTD_ID_EC	4
+#define YYTD_ID_META	5
+#define YYTD_ID_ACCEPT2 6
+#define YYTD_ID_NXT	7
+#define YYTD_ID_TSIZE	8
+
+#define YYTD_DATA8	1
+#define YYTD_DATA16	2
+#define YYTD_DATA32	4
+#define YYTD_DATA64	8
+
+/* ACCEPT & ACCEPT2 tables gets 6 dedicated flags, YYTD_DATAX define the
+ * first flags
+ */
+#define ACCEPT1_FLAGS(X) ((X) & 0x3f)
+#define ACCEPT2_FLAGS(X) ACCEPT1_FLAGS((X) >> YYTD_ID_ACCEPT2)
+#define TO_ACCEPT1_FLAG(X) ACCEPT1_FLAGS(X)
+#define TO_ACCEPT2_FLAG(X) (ACCEPT1_FLAGS(X) << YYTD_ID_ACCEPT2)
+#define DFA_FLAG_VERIFY_STATES 0x1000
+
+struct table_header {
+	u16 td_id;
+	u16 td_flags;
+	u32 td_hilen;
+	u32 td_lolen;
+	char td_data[];
+};
+
+#define DEFAULT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_DEF]->td_data))
+#define BASE_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_BASE]->td_data))
+#define NEXT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_NXT]->td_data))
+#define CHECK_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_CHK]->td_data))
+#define EQUIV_TABLE(DFA) ((u8 *)((DFA)->tables[YYTD_ID_EC]->td_data))
+#define ACCEPT_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT]->td_data))
+#define ACCEPT_TABLE2(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT2]->td_data))
+
+struct aa_dfa {
+	struct kref count;
+	u16 flags;
+	struct table_header *tables[YYTD_ID_TSIZE];
+};
+
+#define byte_to_byte(X) (X)
+
+#define UNPACK_ARRAY(TABLE, BLOB, LEN, TYPE, NTOHX) \
+	do { \
+		typeof(LEN) __i; \
+		TYPE *__t = (TYPE *) TABLE; \
+		TYPE *__b = (TYPE *) BLOB; \
+		for (__i = 0; __i < LEN; __i++) { \
+			__t[__i] = NTOHX(__b[__i]); \
+		} \
+	} while (0)
+
+static inline size_t table_size(size_t len, size_t el_size)
+{
+	return ALIGN(sizeof(struct table_header) + len * el_size, 8);
+}
+
+struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags);
+unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
+			      const char *str, int len);
+unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
+			  const char *str);
+unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
+			 const char c);
+
+void aa_dfa_free_kref(struct kref *kref);
+
+/**
+ * aa_put_dfa - put a dfa refcount
+ * @dfa: dfa to put refcount   (MAYBE NULL)
+ *
+ * Requires: if @dfa != NULL that a valid refcount be held
+ */
+static inline void aa_put_dfa(struct aa_dfa *dfa)
+{
+	if (dfa)
+		kref_put(&dfa->count, aa_dfa_free_kref);
+}
+
+#endif /* __AA_MATCH_H */
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
new file mode 100644
index 0000000..286ac75
--- /dev/null
+++ b/security/apparmor/include/path.h
@@ -0,0 +1,32 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor basic path manipulation function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_PATH_H
+#define __AA_PATH_H
+
+
+enum path_flags {
+	PATH_IS_DIR = 0x1,		/* path is a directory */
+	PATH_CONNECT_PATH = 0x4,	/* connect disconnected paths to / */
+	PATH_CHROOT_REL = 0x8,		/* do path lookup relative to chroot */
+	PATH_CHROOT_NSCONNECT = 0x10,	/* connect paths that are at ns root */
+
+	PATH_DELEGATE_DELETED = 0x08000, /* delegate deleted files */
+	PATH_MEDIATE_DELETED = 0x10000,	/* mediate deleted paths */
+};
+
+int aa_path_name(struct path *path, int flags, char **buffer,
+		 const char **name, const char **info);
+
+#endif /* __AA_PATH_H */
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
new file mode 100644
index 0000000..c28b0f2
--- /dev/null
+++ b/security/apparmor/include/policy.h
@@ -0,0 +1,408 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_POLICY_H
+#define __AA_POLICY_H
+
+#include <linux/capability.h>
+#include <linux/cred.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+
+#include "apparmor.h"
+#include "audit.h"
+#include "capability.h"
+#include "domain.h"
+#include "file.h"
+#include "resource.h"
+
+extern const char *const aa_profile_mode_names[];
+#define APPARMOR_MODE_NAMES_MAX_INDEX 4
+
+#define PROFILE_MODE(_profile, _mode)		\
+	((aa_g_profile_mode == (_mode)) ||	\
+	 ((_profile)->mode == (_mode)))
+
+#define COMPLAIN_MODE(_profile)	PROFILE_MODE((_profile), APPARMOR_COMPLAIN)
+
+#define KILL_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_KILL)
+
+#define PROFILE_IS_HAT(_profile) ((_profile)->flags & PFLAG_HAT)
+
+#define PROFILE_INVALID(_profile) ((_profile)->flags & PFLAG_INVALID)
+
+#define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2)
+
+/*
+ * FIXME: currently need a clean way to replace and remove profiles as a
+ * set.  It should be done at the namespace level.
+ * Either, with a set of profiles loaded at the namespace level or via
+ * a mark and remove marked interface.
+ */
+enum profile_mode {
+	APPARMOR_ENFORCE,	/* enforce access rules */
+	APPARMOR_COMPLAIN,	/* allow and log access violations */
+	APPARMOR_KILL,		/* kill task on access violation */
+	APPARMOR_UNCONFINED,	/* profile set to unconfined */
+};
+
+enum profile_flags {
+	PFLAG_HAT = 1,			/* profile is a hat */
+	PFLAG_NULL = 4,			/* profile is null learning profile */
+	PFLAG_IX_ON_NAME_ERROR = 8,	/* fallback to ix on name lookup fail */
+	PFLAG_IMMUTABLE = 0x10,		/* don't allow changes/replacement */
+	PFLAG_USER_DEFINED = 0x20,	/* user based profile - lower privs */
+	PFLAG_NO_LIST_REF = 0x40,	/* list doesn't keep profile ref */
+	PFLAG_OLD_NULL_TRANS = 0x100,	/* use // as the null transition */
+	PFLAG_INVALID = 0x200,		/* profile replaced/removed */
+	PFLAG_NS_COUNT = 0x400,		/* carries NS ref count */
+
+	/* These flags must correspond with PATH_flags */
+	PFLAG_MEDIATE_DELETED = 0x10000, /* mediate instead delegate deleted */
+};
+
+struct aa_profile;
+
+/* struct aa_policy - common part of both namespaces and profiles
+ * @name: name of the object
+ * @hname - The hierarchical name
+ * @list: list policy object is on
+ * @profiles: head of the profiles list contained in the object
+ */
+struct aa_policy {
+	char *name;
+	char *hname;
+	struct list_head list;
+	struct list_head profiles;
+};
+
+/* struct aa_ns_acct - accounting of profiles in namespace
+ * @max_size: maximum space allowed for all profiles in namespace
+ * @max_count: maximum number of profiles that can be in this namespace
+ * @size: current size of profiles
+ * @count: current count of profiles (includes null profiles)
+ */
+struct aa_ns_acct {
+	int max_size;
+	int max_count;
+	int size;
+	int count;
+};
+
+/* struct aa_namespace - namespace for a set of profiles
+ * @base: common policy
+ * @parent: parent of namespace
+ * @lock: lock for modifying the object
+ * @acct: accounting for the namespace
+ * @unconfined: special unconfined profile for the namespace
+ * @sub_ns: list of namespaces under the current namespace.
+ * @uniq_null: uniq value used for null learning profiles
+ * @uniq_id: a unique id count for the profiles in the namespace
+ * @dents: dentries for the namespaces file entries in apparmorfs
+ *
+ * An aa_namespace defines the set profiles that are searched to determine
+ * which profile to attach to a task.  Profiles can not be shared between
+ * aa_namespaces and profile names within a namespace are guaranteed to be
+ * unique.  When profiles in separate namespaces have the same name they
+ * are NOT considered to be equivalent.
+ *
+ * Namespaces are hierarchical and only namespaces and profiles below the
+ * current namespace are visible.
+ *
+ * Namespace names must be unique and can not contain the characters :/\0
+ *
+ * FIXME TODO: add vserver support of namespaces (can it all be done in
+ *             userspace?)
+ */
+struct aa_namespace {
+	struct aa_policy base;
+	struct aa_namespace *parent;
+	struct mutex lock;
+	struct aa_ns_acct acct;
+	struct aa_profile *unconfined;
+	struct list_head sub_ns;
+	atomic_t uniq_null;
+	long uniq_id;
+
+	struct dentry *dents[AAFS_NS_SIZEOF];
+};
+
+/* struct aa_policydb - match engine for a policy
+ * dfa: dfa pattern match
+ * start: set of start states for the different classes of data
+ */
+struct aa_policydb {
+	/* Generic policy DFA specific rule types will be subsections of it */
+	struct aa_dfa *dfa;
+	unsigned int start[AA_CLASS_LAST + 1];
+
+};
+
+struct aa_replacedby {
+	struct kref count;
+	struct aa_profile __rcu *profile;
+};
+
+
+/* struct aa_profile - basic confinement data
+ * @base - base components of the profile (name, refcount, lists, lock ...)
+ * @count: reference count of the obj
+ * @rcu: rcu head used when removing from @list
+ * @parent: parent of profile
+ * @ns: namespace the profile is in
+ * @replacedby: is set to the profile that replaced this profile
+ * @rename: optional profile name that this profile renamed
+ * @attach: human readable attachment string
+ * @xmatch: optional extended matching for unconfined executables names
+ * @xmatch_len: xmatch prefix len, used to determine xmatch priority
+ * @audit: the auditing mode of the profile
+ * @mode: the enforcement mode of the profile
+ * @flags: flags controlling profile behavior
+ * @path_flags: flags controlling path generation behavior
+ * @size: the memory consumed by this profiles rules
+ * @policy: general match rules governing policy
+ * @file: The set of rules governing basic file access and domain transitions
+ * @caps: capabilities for the profile
+ * @rlimits: rlimits for the profile
+ *
+ * @dents: dentries for the profiles file entries in apparmorfs
+ * @dirname: name of the profile dir in apparmorfs
+ *
+ * The AppArmor profile contains the basic confinement data.  Each profile
+ * has a name, and exists in a namespace.  The @name and @exec_match are
+ * used to determine profile attachment against unconfined tasks.  All other
+ * attachments are determined by profile X transition rules.
+ *
+ * The @replacedby struct is write protected by the profile lock.
+ *
+ * Profiles have a hierarchy where hats and children profiles keep
+ * a reference to their parent.
+ *
+ * Profile names can not begin with a : and can not contain the \0
+ * character.  If a profile name begins with / it will be considered when
+ * determining profile attachment on "unconfined" tasks.
+ */
+struct aa_profile {
+	struct aa_policy base;
+	struct kref count;
+	struct rcu_head rcu;
+	struct aa_profile __rcu *parent;
+
+	struct aa_namespace *ns;
+	struct aa_replacedby *replacedby;
+	const char *rename;
+
+	const char *attach;
+	struct aa_dfa *xmatch;
+	int xmatch_len;
+	enum audit_mode audit;
+	long mode;
+	long flags;
+	u32 path_flags;
+	int size;
+
+	struct aa_policydb policy;
+	struct aa_file_rules file;
+	struct aa_caps caps;
+	struct aa_rlimit rlimits;
+
+	unsigned char *hash;
+	char *dirname;
+	struct dentry *dents[AAFS_PROF_SIZEOF];
+};
+
+extern struct aa_namespace *root_ns;
+extern enum profile_mode aa_g_profile_mode;
+
+void aa_add_profile(struct aa_policy *common, struct aa_profile *profile);
+
+bool aa_ns_visible(struct aa_namespace *curr, struct aa_namespace *view);
+const char *aa_ns_name(struct aa_namespace *parent, struct aa_namespace *child);
+int aa_alloc_root_ns(void);
+void aa_free_root_ns(void);
+void aa_free_namespace_kref(struct kref *kref);
+
+struct aa_namespace *aa_find_namespace(struct aa_namespace *root,
+				       const char *name);
+
+
+void aa_free_replacedby_kref(struct kref *kref);
+struct aa_profile *aa_alloc_profile(const char *name);
+struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat);
+void aa_free_profile(struct aa_profile *profile);
+void aa_free_profile_kref(struct kref *kref);
+struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name);
+struct aa_profile *aa_lookup_profile(struct aa_namespace *ns, const char *name);
+struct aa_profile *aa_match_profile(struct aa_namespace *ns, const char *name);
+
+ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace);
+ssize_t aa_remove_profiles(char *name, size_t size);
+
+#define PROF_ADD 1
+#define PROF_REPLACE 0
+
+#define unconfined(X) ((X)->mode == APPARMOR_UNCONFINED)
+
+
+static inline struct aa_profile *aa_deref_parent(struct aa_profile *p)
+{
+	return rcu_dereference_protected(p->parent,
+					 mutex_is_locked(&p->ns->lock));
+}
+
+/**
+ * aa_get_profile - increment refcount on profile @p
+ * @p: profile  (MAYBE NULL)
+ *
+ * Returns: pointer to @p if @p is NULL will return NULL
+ * Requires: @p must be held with valid refcount when called
+ */
+static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
+{
+	if (p)
+		kref_get(&(p->count));
+
+	return p;
+}
+
+/**
+ * aa_get_profile_not0 - increment refcount on profile @p found via lookup
+ * @p: profile  (MAYBE NULL)
+ *
+ * Returns: pointer to @p if @p is NULL will return NULL
+ * Requires: @p must be held with valid refcount when called
+ */
+static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p)
+{
+	if (p && kref_get_not0(&p->count))
+		return p;
+
+	return NULL;
+}
+
+/**
+ * aa_get_profile_rcu - increment a refcount profile that can be replaced
+ * @p: pointer to profile that can be replaced (NOT NULL)
+ *
+ * Returns: pointer to a refcounted profile.
+ *     else NULL if no profile
+ */
+static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p)
+{
+	struct aa_profile *c;
+
+	rcu_read_lock();
+	do {
+		c = rcu_dereference(*p);
+	} while (c && !kref_get_not0(&c->count));
+	rcu_read_unlock();
+
+	return c;
+}
+
+/**
+ * aa_get_newest_profile - find the newest version of @profile
+ * @profile: the profile to check for newer versions of
+ *
+ * Returns: refcounted newest version of @profile taking into account
+ *          replacement, renames and removals
+ *          return @profile.
+ */
+static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
+{
+	if (!p)
+		return NULL;
+
+	if (PROFILE_INVALID(p))
+		return aa_get_profile_rcu(&p->replacedby->profile);
+
+	return aa_get_profile(p);
+}
+
+/**
+ * aa_put_profile - decrement refcount on profile @p
+ * @p: profile  (MAYBE NULL)
+ */
+static inline void aa_put_profile(struct aa_profile *p)
+{
+	if (p)
+		kref_put(&p->count, aa_free_profile_kref);
+}
+
+static inline struct aa_replacedby *aa_get_replacedby(struct aa_replacedby *p)
+{
+	if (p)
+		kref_get(&(p->count));
+
+	return p;
+}
+
+static inline void aa_put_replacedby(struct aa_replacedby *p)
+{
+	if (p)
+		kref_put(&p->count, aa_free_replacedby_kref);
+}
+
+/* requires profile list write lock held */
+static inline void __aa_update_replacedby(struct aa_profile *orig,
+					  struct aa_profile *new)
+{
+	struct aa_profile *tmp;
+	tmp = rcu_dereference_protected(orig->replacedby->profile,
+					mutex_is_locked(&orig->ns->lock));
+	rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new));
+	orig->flags |= PFLAG_INVALID;
+	aa_put_profile(tmp);
+}
+
+/**
+ * aa_get_namespace - increment references count on @ns
+ * @ns: namespace to increment reference count of (MAYBE NULL)
+ *
+ * Returns: pointer to @ns, if @ns is NULL returns NULL
+ * Requires: @ns must be held with valid refcount when called
+ */
+static inline struct aa_namespace *aa_get_namespace(struct aa_namespace *ns)
+{
+	if (ns)
+		aa_get_profile(ns->unconfined);
+
+	return ns;
+}
+
+/**
+ * aa_put_namespace - decrement refcount on @ns
+ * @ns: namespace to put reference of
+ *
+ * Decrement reference count of @ns and if no longer in use free it
+ */
+static inline void aa_put_namespace(struct aa_namespace *ns)
+{
+	if (ns)
+		aa_put_profile(ns->unconfined);
+}
+
+static inline int AUDIT_MODE(struct aa_profile *profile)
+{
+	if (aa_g_audit != AUDIT_NORMAL)
+		return aa_g_audit;
+
+	return profile->audit;
+}
+
+bool aa_may_manage_policy(int op);
+
+#endif /* __AA_POLICY_H */
diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
new file mode 100644
index 0000000..c214fb8
--- /dev/null
+++ b/security/apparmor/include/policy_unpack.h
@@ -0,0 +1,39 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __POLICY_INTERFACE_H
+#define __POLICY_INTERFACE_H
+
+#include <linux/list.h>
+
+struct aa_load_ent {
+	struct list_head list;
+	struct aa_profile *new;
+	struct aa_profile *old;
+	struct aa_profile *rename;
+};
+
+void aa_load_ent_free(struct aa_load_ent *ent);
+struct aa_load_ent *aa_load_ent_alloc(void);
+
+#define PACKED_FLAG_HAT		1
+
+#define PACKED_MODE_ENFORCE	0
+#define PACKED_MODE_COMPLAIN	1
+#define PACKED_MODE_KILL	2
+#define PACKED_MODE_UNCONFINED	3
+
+int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns);
+
+#endif /* __POLICY_INTERFACE_H */
diff --git a/security/apparmor/include/procattr.h b/security/apparmor/include/procattr.h
new file mode 100644
index 0000000..6bd5f33
--- /dev/null
+++ b/security/apparmor/include/procattr.h
@@ -0,0 +1,25 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor /proc/<pid>/attr/ interface function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_PROCATTR_H
+#define __AA_PROCATTR_H
+
+#define AA_DO_TEST 1
+#define AA_ONEXEC  1
+
+int aa_getprocattr(struct aa_profile *profile, char **string);
+int aa_setprocattr_changehat(char *args, size_t size, int test);
+int aa_setprocattr_changeprofile(char *fqname, bool onexec, int test);
+
+#endif /* __AA_PROCATTR_H */
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
new file mode 100644
index 0000000..d3f4cf0
--- /dev/null
+++ b/security/apparmor/include/resource.h
@@ -0,0 +1,50 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor resource limits function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_RESOURCE_H
+#define __AA_RESOURCE_H
+
+#include <linux/resource.h>
+#include <linux/sched.h>
+
+#include "apparmorfs.h"
+
+struct aa_profile;
+
+/* struct aa_rlimit - rlimit settings for the profile
+ * @mask: which hard limits to set
+ * @limits: rlimit values that override task limits
+ *
+ * AppArmor rlimits are used to set confined task rlimits.  Only the
+ * limits specified in @mask will be controlled by apparmor.
+ */
+struct aa_rlimit {
+	unsigned int mask;
+	struct rlimit limits[RLIM_NLIMITS];
+};
+
+extern struct aa_fs_entry aa_fs_entry_rlimit[];
+
+int aa_map_resource(int resource);
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
+		      unsigned int resource, struct rlimit *new_rlim);
+
+void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
+
+static inline void aa_free_rlimit_rules(struct aa_rlimit *rlims)
+{
+	/* NOP */
+}
+
+#endif /* __AA_RESOURCE_H */
diff --git a/security/apparmor/include/sid.h b/security/apparmor/include/sid.h
new file mode 100644
index 0000000..513ca0e
--- /dev/null
+++ b/security/apparmor/include/sid.h
@@ -0,0 +1,26 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor security identifier (sid) definitions
+ *
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_SID_H
+#define __AA_SID_H
+
+#include <linux/types.h>
+
+/* sid value that will not be allocated */
+#define AA_SID_INVALID 0
+#define AA_SID_ALLOC AA_SID_INVALID
+
+u32 aa_alloc_sid(void);
+void aa_free_sid(u32 sid);
+
+#endif /* __AA_SID_H */
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
new file mode 100644
index 0000000..777ac1c
--- /dev/null
+++ b/security/apparmor/ipc.c
@@ -0,0 +1,111 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor ipc mediation
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/gfp.h>
+#include <linux/ptrace.h>
+
+#include "include/audit.h"
+#include "include/capability.h"
+#include "include/context.h"
+#include "include/policy.h"
+#include "include/ipc.h"
+
+/* call back to audit ptrace fields */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+	struct common_audit_data *sa = va;
+	audit_log_format(ab, " target=");
+	audit_log_untrustedstring(ab, sa->aad->target);
+}
+
+/**
+ * aa_audit_ptrace - do auditing for ptrace
+ * @profile: profile being enforced  (NOT NULL)
+ * @target: profile being traced (NOT NULL)
+ * @error: error condition
+ *
+ * Returns: %0 or error code
+ */
+static int aa_audit_ptrace(struct aa_profile *profile,
+			   struct aa_profile *target, int error)
+{
+	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
+	sa.type = LSM_AUDIT_DATA_NONE;
+	sa.aad = &aad;
+	aad.op = OP_PTRACE;
+	aad.target = target;
+	aad.error = error;
+
+	return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_ATOMIC, &sa,
+			audit_cb);
+}
+
+/**
+ * aa_may_ptrace - test if tracer task can trace the tracee
+ * @tracer: profile of the task doing the tracing  (NOT NULL)
+ * @tracee: task to be traced
+ * @mode: whether PTRACE_MODE_READ || PTRACE_MODE_ATTACH
+ *
+ * Returns: %0 else error code if permission denied or error
+ */
+int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
+		  unsigned int mode)
+{
+	/* TODO: currently only based on capability, not extended ptrace
+	 *       rules,
+	 *       Test mode for PTRACE_MODE_READ || PTRACE_MODE_ATTACH
+	 */
+
+	if (unconfined(tracer) || tracer == tracee)
+		return 0;
+	/* log this capability request */
+	return aa_capable(tracer, CAP_SYS_PTRACE, 1);
+}
+
+/**
+ * aa_ptrace - do ptrace permission check and auditing
+ * @tracer: task doing the tracing (NOT NULL)
+ * @tracee: task being traced (NOT NULL)
+ * @mode: ptrace mode either PTRACE_MODE_READ || PTRACE_MODE_ATTACH
+ *
+ * Returns: %0 else error code if permission denied or error
+ */
+int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
+	      unsigned int mode)
+{
+	/*
+	 * tracer can ptrace tracee when
+	 * - tracer is unconfined ||
+	 *   - tracer is in complain mode
+	 *   - tracer has rules allowing it to trace tracee currently this is:
+	 *       - confined by the same profile ||
+	 *       - tracer profile has CAP_SYS_PTRACE
+	 */
+
+	struct aa_profile *tracer_p = aa_get_task_profile(tracer);
+	int error = 0;
+
+	if (!unconfined(tracer_p)) {
+		struct aa_profile *tracee_p = aa_get_task_profile(tracee);
+
+		error = aa_may_ptrace(tracer_p, tracee_p, mode);
+		error = aa_audit_ptrace(tracer_p, tracee_p, error);
+
+		aa_put_profile(tracee_p);
+	}
+	aa_put_profile(tracer_p);
+
+	return error;
+}
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
new file mode 100644
index 0000000..c1827e0
--- /dev/null
+++ b/security/apparmor/lib.c
@@ -0,0 +1,106 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains basic common functions used in AppArmor
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#include "include/audit.h"
+#include "include/apparmor.h"
+
+
+/**
+ * aa_split_fqname - split a fqname into a profile and namespace name
+ * @fqname: a full qualified name in namespace profile format (NOT NULL)
+ * @ns_name: pointer to portion of the string containing the ns name (NOT NULL)
+ *
+ * Returns: profile name or NULL if one is not specified
+ *
+ * Split a namespace name from a profile name (see policy.c for naming
+ * description).  If a portion of the name is missing it returns NULL for
+ * that portion.
+ *
+ * NOTE: may modify the @fqname string.  The pointers returned point
+ *       into the @fqname string.
+ */
+char *aa_split_fqname(char *fqname, char **ns_name)
+{
+	char *name = strim(fqname);
+
+	*ns_name = NULL;
+	if (name[0] == ':') {
+		char *split = strchr(&name[1], ':');
+		*ns_name = skip_spaces(&name[1]);
+		if (split) {
+			/* overwrite ':' with \0 */
+			*split++ = 0;
+			if (strncmp(split, "//", 2) == 0)
+				split += 2;
+			name = skip_spaces(split);
+		} else
+			/* a ns name without a following profile is allowed */
+			name = NULL;
+	}
+	if (name && *name == 0)
+		name = NULL;
+
+	return name;
+}
+
+/**
+ * aa_info_message - log a none profile related status message
+ * @str: message to log
+ */
+void aa_info_message(const char *str)
+{
+	if (audit_enabled) {
+		struct common_audit_data sa;
+		struct apparmor_audit_data aad = {0,};
+		sa.type = LSM_AUDIT_DATA_NONE;
+		sa.aad = &aad;
+		aad.info = str;
+		aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
+	}
+	printk(KERN_INFO "AppArmor: %s\n", str);
+}
+
+/**
+ * __aa_kvmalloc - do allocation preferring kmalloc but falling back to vmalloc
+ * @size: how many bytes of memory are required
+ * @flags: the type of memory to allocate (see kmalloc).
+ *
+ * Return: allocated buffer or NULL if failed
+ *
+ * It is possible that policy being loaded from the user is larger than
+ * what can be allocated by kmalloc, in those cases fall back to vmalloc.
+ */
+void *__aa_kvmalloc(size_t size, gfp_t flags)
+{
+	void *buffer = NULL;
+
+	if (size == 0)
+		return NULL;
+
+	/* do not attempt kmalloc if we need more than 16 pages at once */
+	if (size <= (16*PAGE_SIZE))
+		buffer = kmalloc(size, flags | GFP_NOIO | __GFP_NOWARN);
+	if (!buffer) {
+		if (flags & __GFP_ZERO)
+			buffer = vzalloc(size);
+		else
+			buffer = vmalloc(size);
+	}
+	return buffer;
+}
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
new file mode 100644
index 0000000..dec607c
--- /dev/null
+++ b/security/apparmor/lsm.c
@@ -0,0 +1,926 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor LSM hooks.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/lsm_hooks.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/ptrace.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/audit.h>
+#include <linux/user_namespace.h>
+#include <net/sock.h>
+
+#include "include/apparmor.h"
+#include "include/apparmorfs.h"
+#include "include/audit.h"
+#include "include/capability.h"
+#include "include/context.h"
+#include "include/file.h"
+#include "include/ipc.h"
+#include "include/path.h"
+#include "include/policy.h"
+#include "include/procattr.h"
+
+/* Flag indicating whether initialization completed */
+int apparmor_initialized __initdata;
+
+/*
+ * LSM hook functions
+ */
+
+/*
+ * free the associated aa_task_cxt and put its profiles
+ */
+static void apparmor_cred_free(struct cred *cred)
+{
+	aa_free_task_context(cred_cxt(cred));
+	cred_cxt(cred) = NULL;
+}
+
+/*
+ * allocate the apparmor part of blank credentials
+ */
+static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+	/* freed by apparmor_cred_free */
+	struct aa_task_cxt *cxt = aa_alloc_task_context(gfp);
+	if (!cxt)
+		return -ENOMEM;
+
+	cred_cxt(cred) = cxt;
+	return 0;
+}
+
+/*
+ * prepare new aa_task_cxt for modification by prepare_cred block
+ */
+static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
+				 gfp_t gfp)
+{
+	/* freed by apparmor_cred_free */
+	struct aa_task_cxt *cxt = aa_alloc_task_context(gfp);
+	if (!cxt)
+		return -ENOMEM;
+
+	aa_dup_task_context(cxt, cred_cxt(old));
+	cred_cxt(new) = cxt;
+	return 0;
+}
+
+/*
+ * transfer the apparmor data to a blank set of creds
+ */
+static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
+{
+	const struct aa_task_cxt *old_cxt = cred_cxt(old);
+	struct aa_task_cxt *new_cxt = cred_cxt(new);
+
+	aa_dup_task_context(new_cxt, old_cxt);
+}
+
+static int apparmor_ptrace_access_check(struct task_struct *child,
+					unsigned int mode)
+{
+	return aa_ptrace(current, child, mode);
+}
+
+static int apparmor_ptrace_traceme(struct task_struct *parent)
+{
+	return aa_ptrace(parent, current, PTRACE_MODE_ATTACH);
+}
+
+/* Derived from security/commoncap.c:cap_capget */
+static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective,
+			   kernel_cap_t *inheritable, kernel_cap_t *permitted)
+{
+	struct aa_profile *profile;
+	const struct cred *cred;
+
+	rcu_read_lock();
+	cred = __task_cred(target);
+	profile = aa_cred_profile(cred);
+
+	/*
+	 * cap_capget is stacked ahead of this and will
+	 * initialize effective and permitted.
+	 */
+	if (!unconfined(profile) && !COMPLAIN_MODE(profile)) {
+		*effective = cap_intersect(*effective, profile->caps.allow);
+		*permitted = cap_intersect(*permitted, profile->caps.allow);
+	}
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
+			    int cap, int audit)
+{
+	struct aa_profile *profile;
+	int error = 0;
+
+	profile = aa_cred_profile(cred);
+	if (!unconfined(profile))
+		error = aa_capable(profile, cap, audit);
+	return error;
+}
+
+/**
+ * common_perm - basic common permission check wrapper fn for paths
+ * @op: operation being checked
+ * @path: path to check permission of  (NOT NULL)
+ * @mask: requested permissions mask
+ * @cond: conditional info for the permission request  (NOT NULL)
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm(int op, struct path *path, u32 mask,
+		       struct path_cond *cond)
+{
+	struct aa_profile *profile;
+	int error = 0;
+
+	profile = __aa_current_profile();
+	if (!unconfined(profile))
+		error = aa_path_perm(op, profile, path, 0, mask, cond);
+
+	return error;
+}
+
+/**
+ * common_perm_dir_dentry - common permission wrapper when path is dir, dentry
+ * @op: operation being checked
+ * @dir: directory of the dentry  (NOT NULL)
+ * @dentry: dentry to check  (NOT NULL)
+ * @mask: requested permissions mask
+ * @cond: conditional info for the permission request  (NOT NULL)
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_dir_dentry(int op, struct path *dir,
+				  struct dentry *dentry, u32 mask,
+				  struct path_cond *cond)
+{
+	struct path path = { dir->mnt, dentry };
+
+	return common_perm(op, &path, mask, cond);
+}
+
+/**
+ * common_perm_mnt_dentry - common permission wrapper when mnt, dentry
+ * @op: operation being checked
+ * @mnt: mount point of dentry (NOT NULL)
+ * @dentry: dentry to check  (NOT NULL)
+ * @mask: requested permissions mask
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_mnt_dentry(int op, struct vfsmount *mnt,
+				  struct dentry *dentry, u32 mask)
+{
+	struct path path = { mnt, dentry };
+	struct path_cond cond = { d_backing_inode(dentry)->i_uid,
+				  d_backing_inode(dentry)->i_mode
+	};
+
+	return common_perm(op, &path, mask, &cond);
+}
+
+/**
+ * common_perm_rm - common permission wrapper for operations doing rm
+ * @op: operation being checked
+ * @dir: directory that the dentry is in  (NOT NULL)
+ * @dentry: dentry being rm'd  (NOT NULL)
+ * @mask: requested permission mask
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_rm(int op, struct path *dir,
+			  struct dentry *dentry, u32 mask)
+{
+	struct inode *inode = d_backing_inode(dentry);
+	struct path_cond cond = { };
+
+	if (!inode || !dir->mnt || !mediated_filesystem(dentry))
+		return 0;
+
+	cond.uid = inode->i_uid;
+	cond.mode = inode->i_mode;
+
+	return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
+}
+
+/**
+ * common_perm_create - common permission wrapper for operations doing create
+ * @op: operation being checked
+ * @dir: directory that dentry will be created in  (NOT NULL)
+ * @dentry: dentry to create   (NOT NULL)
+ * @mask: request permission mask
+ * @mode: created file mode
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_create(int op, struct path *dir, struct dentry *dentry,
+			      u32 mask, umode_t mode)
+{
+	struct path_cond cond = { current_fsuid(), mode };
+
+	if (!dir->mnt || !mediated_filesystem(dir->dentry))
+		return 0;
+
+	return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
+}
+
+static int apparmor_path_unlink(struct path *dir, struct dentry *dentry)
+{
+	return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE);
+}
+
+static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry,
+			       umode_t mode)
+{
+	return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE,
+				  S_IFDIR);
+}
+
+static int apparmor_path_rmdir(struct path *dir, struct dentry *dentry)
+{
+	return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE);
+}
+
+static int apparmor_path_mknod(struct path *dir, struct dentry *dentry,
+			       umode_t mode, unsigned int dev)
+{
+	return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode);
+}
+
+static int apparmor_path_truncate(struct path *path)
+{
+	struct path_cond cond = { d_backing_inode(path->dentry)->i_uid,
+				  d_backing_inode(path->dentry)->i_mode
+	};
+
+	if (!path->mnt || !mediated_filesystem(path->dentry))
+		return 0;
+
+	return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE,
+			   &cond);
+}
+
+static int apparmor_path_symlink(struct path *dir, struct dentry *dentry,
+				 const char *old_name)
+{
+	return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE,
+				  S_IFLNK);
+}
+
+static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir,
+			      struct dentry *new_dentry)
+{
+	struct aa_profile *profile;
+	int error = 0;
+
+	if (!mediated_filesystem(old_dentry))
+		return 0;
+
+	profile = aa_current_profile();
+	if (!unconfined(profile))
+		error = aa_path_link(profile, old_dentry, new_dir, new_dentry);
+	return error;
+}
+
+static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
+				struct path *new_dir, struct dentry *new_dentry)
+{
+	struct aa_profile *profile;
+	int error = 0;
+
+	if (!mediated_filesystem(old_dentry))
+		return 0;
+
+	profile = aa_current_profile();
+	if (!unconfined(profile)) {
+		struct path old_path = { old_dir->mnt, old_dentry };
+		struct path new_path = { new_dir->mnt, new_dentry };
+		struct path_cond cond = { d_backing_inode(old_dentry)->i_uid,
+					  d_backing_inode(old_dentry)->i_mode
+		};
+
+		error = aa_path_perm(OP_RENAME_SRC, profile, &old_path, 0,
+				     MAY_READ | AA_MAY_META_READ | MAY_WRITE |
+				     AA_MAY_META_WRITE | AA_MAY_DELETE,
+				     &cond);
+		if (!error)
+			error = aa_path_perm(OP_RENAME_DEST, profile, &new_path,
+					     0, MAY_WRITE | AA_MAY_META_WRITE |
+					     AA_MAY_CREATE, &cond);
+
+	}
+	return error;
+}
+
+static int apparmor_path_chmod(struct path *path, umode_t mode)
+{
+	if (!mediated_filesystem(path->dentry))
+		return 0;
+
+	return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
+}
+
+static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+{
+	struct path_cond cond =  { d_backing_inode(path->dentry)->i_uid,
+				   d_backing_inode(path->dentry)->i_mode
+	};
+
+	if (!mediated_filesystem(path->dentry))
+		return 0;
+
+	return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
+}
+
+static int apparmor_inode_getattr(const struct path *path)
+{
+	if (!mediated_filesystem(path->dentry))
+		return 0;
+
+	return common_perm_mnt_dentry(OP_GETATTR, path->mnt, path->dentry,
+				      AA_MAY_META_READ);
+}
+
+static int apparmor_file_open(struct file *file, const struct cred *cred)
+{
+	struct aa_file_cxt *fcxt = file->f_security;
+	struct aa_profile *profile;
+	int error = 0;
+
+	if (!mediated_filesystem(file->f_path.dentry))
+		return 0;
+
+	/* If in exec, permission is handled by bprm hooks.
+	 * Cache permissions granted by the previous exec check, with
+	 * implicit read and executable mmap which are required to
+	 * actually execute the image.
+	 */
+	if (current->in_execve) {
+		fcxt->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
+		return 0;
+	}
+
+	profile = aa_cred_profile(cred);
+	if (!unconfined(profile)) {
+		struct inode *inode = file_inode(file);
+		struct path_cond cond = { inode->i_uid, inode->i_mode };
+
+		error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0,
+				     aa_map_file_to_perms(file), &cond);
+		/* todo cache full allowed permissions set and state */
+		fcxt->allow = aa_map_file_to_perms(file);
+	}
+
+	return error;
+}
+
+static int apparmor_file_alloc_security(struct file *file)
+{
+	/* freed by apparmor_file_free_security */
+	file->f_security = aa_alloc_file_context(GFP_KERNEL);
+	if (!file->f_security)
+		return -ENOMEM;
+	return 0;
+
+}
+
+static void apparmor_file_free_security(struct file *file)
+{
+	struct aa_file_cxt *cxt = file->f_security;
+
+	aa_free_file_context(cxt);
+}
+
+static int common_file_perm(int op, struct file *file, u32 mask)
+{
+	struct aa_file_cxt *fcxt = file->f_security;
+	struct aa_profile *profile, *fprofile = aa_cred_profile(file->f_cred);
+	int error = 0;
+
+	BUG_ON(!fprofile);
+
+	if (!file->f_path.mnt ||
+	    !mediated_filesystem(file->f_path.dentry))
+		return 0;
+
+	profile = __aa_current_profile();
+
+	/* revalidate access, if task is unconfined, or the cached cred
+	 * doesn't match or if the request is for more permissions than
+	 * was granted.
+	 *
+	 * Note: the test for !unconfined(fprofile) is to handle file
+	 *       delegation from unconfined tasks
+	 */
+	if (!unconfined(profile) && !unconfined(fprofile) &&
+	    ((fprofile != profile) || (mask & ~fcxt->allow)))
+		error = aa_file_perm(op, profile, file, mask);
+
+	return error;
+}
+
+static int apparmor_file_permission(struct file *file, int mask)
+{
+	return common_file_perm(OP_FPERM, file, mask);
+}
+
+static int apparmor_file_lock(struct file *file, unsigned int cmd)
+{
+	u32 mask = AA_MAY_LOCK;
+
+	if (cmd == F_WRLCK)
+		mask |= MAY_WRITE;
+
+	return common_file_perm(OP_FLOCK, file, mask);
+}
+
+static int common_mmap(int op, struct file *file, unsigned long prot,
+		       unsigned long flags)
+{
+	int mask = 0;
+
+	if (!file || !file->f_security)
+		return 0;
+
+	if (prot & PROT_READ)
+		mask |= MAY_READ;
+	/*
+	 * Private mappings don't require write perms since they don't
+	 * write back to the files
+	 */
+	if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE))
+		mask |= MAY_WRITE;
+	if (prot & PROT_EXEC)
+		mask |= AA_EXEC_MMAP;
+
+	return common_file_perm(op, file, mask);
+}
+
+static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
+			      unsigned long prot, unsigned long flags)
+{
+	return common_mmap(OP_FMMAP, file, prot, flags);
+}
+
+static int apparmor_file_mprotect(struct vm_area_struct *vma,
+				  unsigned long reqprot, unsigned long prot)
+{
+	return common_mmap(OP_FMPROT, vma->vm_file, prot,
+			   !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0);
+}
+
+static int apparmor_getprocattr(struct task_struct *task, char *name,
+				char **value)
+{
+	int error = -ENOENT;
+	/* released below */
+	const struct cred *cred = get_task_cred(task);
+	struct aa_task_cxt *cxt = cred_cxt(cred);
+	struct aa_profile *profile = NULL;
+
+	if (strcmp(name, "current") == 0)
+		profile = aa_get_newest_profile(cxt->profile);
+	else if (strcmp(name, "prev") == 0  && cxt->previous)
+		profile = aa_get_newest_profile(cxt->previous);
+	else if (strcmp(name, "exec") == 0 && cxt->onexec)
+		profile = aa_get_newest_profile(cxt->onexec);
+	else
+		error = -EINVAL;
+
+	if (profile)
+		error = aa_getprocattr(profile, value);
+
+	aa_put_profile(profile);
+	put_cred(cred);
+
+	return error;
+}
+
+static int apparmor_setprocattr(struct task_struct *task, char *name,
+				void *value, size_t size)
+{
+	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
+	char *command, *args = value;
+	size_t arg_size;
+	int error;
+
+	if (size == 0)
+		return -EINVAL;
+	/* args points to a PAGE_SIZE buffer, AppArmor requires that
+	 * the buffer must be null terminated or have size <= PAGE_SIZE -1
+	 * so that AppArmor can null terminate them
+	 */
+	if (args[size - 1] != '\0') {
+		if (size == PAGE_SIZE)
+			return -EINVAL;
+		args[size] = '\0';
+	}
+
+	/* task can only write its own attributes */
+	if (current != task)
+		return -EACCES;
+
+	args = value;
+	args = strim(args);
+	command = strsep(&args, " ");
+	if (!args)
+		return -EINVAL;
+	args = skip_spaces(args);
+	if (!*args)
+		return -EINVAL;
+
+	arg_size = size - (args - (char *) value);
+	if (strcmp(name, "current") == 0) {
+		if (strcmp(command, "changehat") == 0) {
+			error = aa_setprocattr_changehat(args, arg_size,
+							 !AA_DO_TEST);
+		} else if (strcmp(command, "permhat") == 0) {
+			error = aa_setprocattr_changehat(args, arg_size,
+							 AA_DO_TEST);
+		} else if (strcmp(command, "changeprofile") == 0) {
+			error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
+							     !AA_DO_TEST);
+		} else if (strcmp(command, "permprofile") == 0) {
+			error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
+							     AA_DO_TEST);
+		} else
+			goto fail;
+	} else if (strcmp(name, "exec") == 0) {
+		if (strcmp(command, "exec") == 0)
+			error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
+							     !AA_DO_TEST);
+		else
+			goto fail;
+	} else
+		/* only support the "current" and "exec" process attributes */
+		return -EINVAL;
+
+	if (!error)
+		error = size;
+	return error;
+
+fail:
+	sa.type = LSM_AUDIT_DATA_NONE;
+	sa.aad = &aad;
+	aad.profile = aa_current_profile();
+	aad.op = OP_SETPROCATTR;
+	aad.info = name;
+	aad.error = -EINVAL;
+	aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
+	return -EINVAL;
+}
+
+static int apparmor_task_setrlimit(struct task_struct *task,
+		unsigned int resource, struct rlimit *new_rlim)
+{
+	struct aa_profile *profile = __aa_current_profile();
+	int error = 0;
+
+	if (!unconfined(profile))
+		error = aa_task_setrlimit(profile, task, resource, new_rlim);
+
+	return error;
+}
+
+static struct security_hook_list apparmor_hooks[] = {
+	LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
+	LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
+	LSM_HOOK_INIT(capget, apparmor_capget),
+	LSM_HOOK_INIT(capable, apparmor_capable),
+
+	LSM_HOOK_INIT(path_link, apparmor_path_link),
+	LSM_HOOK_INIT(path_unlink, apparmor_path_unlink),
+	LSM_HOOK_INIT(path_symlink, apparmor_path_symlink),
+	LSM_HOOK_INIT(path_mkdir, apparmor_path_mkdir),
+	LSM_HOOK_INIT(path_rmdir, apparmor_path_rmdir),
+	LSM_HOOK_INIT(path_mknod, apparmor_path_mknod),
+	LSM_HOOK_INIT(path_rename, apparmor_path_rename),
+	LSM_HOOK_INIT(path_chmod, apparmor_path_chmod),
+	LSM_HOOK_INIT(path_chown, apparmor_path_chown),
+	LSM_HOOK_INIT(path_truncate, apparmor_path_truncate),
+	LSM_HOOK_INIT(inode_getattr, apparmor_inode_getattr),
+
+	LSM_HOOK_INIT(file_open, apparmor_file_open),
+	LSM_HOOK_INIT(file_permission, apparmor_file_permission),
+	LSM_HOOK_INIT(file_alloc_security, apparmor_file_alloc_security),
+	LSM_HOOK_INIT(file_free_security, apparmor_file_free_security),
+	LSM_HOOK_INIT(mmap_file, apparmor_mmap_file),
+	LSM_HOOK_INIT(file_mprotect, apparmor_file_mprotect),
+	LSM_HOOK_INIT(file_lock, apparmor_file_lock),
+
+	LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
+	LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
+
+	LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
+	LSM_HOOK_INIT(cred_free, apparmor_cred_free),
+	LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
+	LSM_HOOK_INIT(cred_transfer, apparmor_cred_transfer),
+
+	LSM_HOOK_INIT(bprm_set_creds, apparmor_bprm_set_creds),
+	LSM_HOOK_INIT(bprm_committing_creds, apparmor_bprm_committing_creds),
+	LSM_HOOK_INIT(bprm_committed_creds, apparmor_bprm_committed_creds),
+	LSM_HOOK_INIT(bprm_secureexec, apparmor_bprm_secureexec),
+
+	LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit),
+};
+
+/*
+ * AppArmor sysfs module parameters
+ */
+
+static int param_set_aabool(const char *val, const struct kernel_param *kp);
+static int param_get_aabool(char *buffer, const struct kernel_param *kp);
+#define param_check_aabool param_check_bool
+static const struct kernel_param_ops param_ops_aabool = {
+	.flags = KERNEL_PARAM_OPS_FL_NOARG,
+	.set = param_set_aabool,
+	.get = param_get_aabool
+};
+
+static int param_set_aauint(const char *val, const struct kernel_param *kp);
+static int param_get_aauint(char *buffer, const struct kernel_param *kp);
+#define param_check_aauint param_check_uint
+static const struct kernel_param_ops param_ops_aauint = {
+	.set = param_set_aauint,
+	.get = param_get_aauint
+};
+
+static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
+static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
+#define param_check_aalockpolicy param_check_bool
+static const struct kernel_param_ops param_ops_aalockpolicy = {
+	.flags = KERNEL_PARAM_OPS_FL_NOARG,
+	.set = param_set_aalockpolicy,
+	.get = param_get_aalockpolicy
+};
+
+static int param_set_audit(const char *val, struct kernel_param *kp);
+static int param_get_audit(char *buffer, struct kernel_param *kp);
+
+static int param_set_mode(const char *val, struct kernel_param *kp);
+static int param_get_mode(char *buffer, struct kernel_param *kp);
+
+/* Flag values, also controllable via /sys/module/apparmor/parameters
+ * We define special types as we want to do additional mediation.
+ */
+
+/* AppArmor global enforcement switch - complain, enforce, kill */
+enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE;
+module_param_call(mode, param_set_mode, param_get_mode,
+		  &aa_g_profile_mode, S_IRUSR | S_IWUSR);
+
+/* Debug mode */
+bool aa_g_debug;
+module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR);
+
+/* Audit mode */
+enum audit_mode aa_g_audit;
+module_param_call(audit, param_set_audit, param_get_audit,
+		  &aa_g_audit, S_IRUSR | S_IWUSR);
+
+/* Determines if audit header is included in audited messages.  This
+ * provides more context if the audit daemon is not running
+ */
+bool aa_g_audit_header = 1;
+module_param_named(audit_header, aa_g_audit_header, aabool,
+		   S_IRUSR | S_IWUSR);
+
+/* lock out loading/removal of policy
+ * TODO: add in at boot loading of policy, which is the only way to
+ *       load policy, if lock_policy is set
+ */
+bool aa_g_lock_policy;
+module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy,
+		   S_IRUSR | S_IWUSR);
+
+/* Syscall logging mode */
+bool aa_g_logsyscall;
+module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR);
+
+/* Maximum pathname length before accesses will start getting rejected */
+unsigned int aa_g_path_max = 2 * PATH_MAX;
+module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR);
+
+/* Determines how paranoid loading of policy is and how much verification
+ * on the loaded policy is done.
+ */
+bool aa_g_paranoid_load = 1;
+module_param_named(paranoid_load, aa_g_paranoid_load, aabool,
+		   S_IRUSR | S_IWUSR);
+
+/* Boot time disable flag */
+static bool apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE;
+module_param_named(enabled, apparmor_enabled, bool, S_IRUGO);
+
+static int __init apparmor_enabled_setup(char *str)
+{
+	unsigned long enabled;
+	int error = kstrtoul(str, 0, &enabled);
+	if (!error)
+		apparmor_enabled = enabled ? 1 : 0;
+	return 1;
+}
+
+__setup("apparmor=", apparmor_enabled_setup);
+
+/* set global flag turning off the ability to load policy */
+static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp)
+{
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+	if (aa_g_lock_policy)
+		return -EACCES;
+	return param_set_bool(val, kp);
+}
+
+static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
+{
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+	return param_get_bool(buffer, kp);
+}
+
+static int param_set_aabool(const char *val, const struct kernel_param *kp)
+{
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+	return param_set_bool(val, kp);
+}
+
+static int param_get_aabool(char *buffer, const struct kernel_param *kp)
+{
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+	return param_get_bool(buffer, kp);
+}
+
+static int param_set_aauint(const char *val, const struct kernel_param *kp)
+{
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+	return param_set_uint(val, kp);
+}
+
+static int param_get_aauint(char *buffer, const struct kernel_param *kp)
+{
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+	return param_get_uint(buffer, kp);
+}
+
+static int param_get_audit(char *buffer, struct kernel_param *kp)
+{
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	if (!apparmor_enabled)
+		return -EINVAL;
+
+	return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
+}
+
+static int param_set_audit(const char *val, struct kernel_param *kp)
+{
+	int i;
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	if (!apparmor_enabled)
+		return -EINVAL;
+
+	if (!val)
+		return -EINVAL;
+
+	for (i = 0; i < AUDIT_MAX_INDEX; i++) {
+		if (strcmp(val, audit_mode_names[i]) == 0) {
+			aa_g_audit = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int param_get_mode(char *buffer, struct kernel_param *kp)
+{
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	if (!apparmor_enabled)
+		return -EINVAL;
+
+	return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
+}
+
+static int param_set_mode(const char *val, struct kernel_param *kp)
+{
+	int i;
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	if (!apparmor_enabled)
+		return -EINVAL;
+
+	if (!val)
+		return -EINVAL;
+
+	for (i = 0; i < APPARMOR_MODE_NAMES_MAX_INDEX; i++) {
+		if (strcmp(val, aa_profile_mode_names[i]) == 0) {
+			aa_g_profile_mode = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * AppArmor init functions
+ */
+
+/**
+ * set_init_cxt - set a task context and profile on the first task.
+ *
+ * TODO: allow setting an alternate profile than unconfined
+ */
+static int __init set_init_cxt(void)
+{
+	struct cred *cred = (struct cred *)current->real_cred;
+	struct aa_task_cxt *cxt;
+
+	cxt = aa_alloc_task_context(GFP_KERNEL);
+	if (!cxt)
+		return -ENOMEM;
+
+	cxt->profile = aa_get_profile(root_ns->unconfined);
+	cred_cxt(cred) = cxt;
+
+	return 0;
+}
+
+static int __init apparmor_init(void)
+{
+	int error;
+
+	if (!apparmor_enabled || !security_module_enable("apparmor")) {
+		aa_info_message("AppArmor disabled by boot time parameter");
+		apparmor_enabled = 0;
+		return 0;
+	}
+
+	error = aa_alloc_root_ns();
+	if (error) {
+		AA_ERROR("Unable to allocate default profile namespace\n");
+		goto alloc_out;
+	}
+
+	error = set_init_cxt();
+	if (error) {
+		AA_ERROR("Failed to set context on init task\n");
+		aa_free_root_ns();
+		goto alloc_out;
+	}
+	security_add_hooks(apparmor_hooks, ARRAY_SIZE(apparmor_hooks));
+
+	/* Report that AppArmor successfully initialized */
+	apparmor_initialized = 1;
+	if (aa_g_profile_mode == APPARMOR_COMPLAIN)
+		aa_info_message("AppArmor initialized: complain mode enabled");
+	else if (aa_g_profile_mode == APPARMOR_KILL)
+		aa_info_message("AppArmor initialized: kill mode enabled");
+	else
+		aa_info_message("AppArmor initialized");
+
+	return error;
+
+alloc_out:
+	aa_destroy_aafs();
+
+	apparmor_enabled = 0;
+	return error;
+}
+
+security_initcall(apparmor_init);
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
new file mode 100644
index 0000000..727eb42
--- /dev/null
+++ b/security/apparmor/match.c
@@ -0,0 +1,428 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor dfa based regular expression matching engine
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2012 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+
+#include "include/apparmor.h"
+#include "include/match.h"
+
+#define base_idx(X) ((X) & 0xffffff)
+
+/**
+ * unpack_table - unpack a dfa table (one of accept, default, base, next check)
+ * @blob: data to unpack (NOT NULL)
+ * @bsize: size of blob
+ *
+ * Returns: pointer to table else NULL on failure
+ *
+ * NOTE: must be freed by kvfree (not kfree)
+ */
+static struct table_header *unpack_table(char *blob, size_t bsize)
+{
+	struct table_header *table = NULL;
+	struct table_header th;
+	size_t tsize;
+
+	if (bsize < sizeof(struct table_header))
+		goto out;
+
+	/* loaded td_id's start at 1, subtract 1 now to avoid doing
+	 * it every time we use td_id as an index
+	 */
+	th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1;
+	th.td_flags = be16_to_cpu(*(u16 *) (blob + 2));
+	th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8));
+	blob += sizeof(struct table_header);
+
+	if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 ||
+	      th.td_flags == YYTD_DATA8))
+		goto out;
+
+	tsize = table_size(th.td_lolen, th.td_flags);
+	if (bsize < tsize)
+		goto out;
+
+	table = kvzalloc(tsize);
+	if (table) {
+		*table = th;
+		if (th.td_flags == YYTD_DATA8)
+			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+				     u8, byte_to_byte);
+		else if (th.td_flags == YYTD_DATA16)
+			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+				     u16, be16_to_cpu);
+		else if (th.td_flags == YYTD_DATA32)
+			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+				     u32, be32_to_cpu);
+		else
+			goto fail;
+	}
+
+out:
+	/* if table was vmalloced make sure the page tables are synced
+	 * before it is used, as it goes live to all cpus.
+	 */
+	if (is_vmalloc_addr(table))
+		vm_unmap_aliases();
+	return table;
+fail:
+	kvfree(table);
+	return NULL;
+}
+
+/**
+ * verify_dfa - verify that transitions and states in the tables are in bounds.
+ * @dfa: dfa to test  (NOT NULL)
+ * @flags: flags controlling what type of accept table are acceptable
+ *
+ * Assumes dfa has gone through the first pass verification done by unpacking
+ * NOTE: this does not valid accept table values
+ *
+ * Returns: %0 else error code on failure to verify
+ */
+static int verify_dfa(struct aa_dfa *dfa, int flags)
+{
+	size_t i, state_count, trans_count;
+	int error = -EPROTO;
+
+	/* check that required tables exist */
+	if (!(dfa->tables[YYTD_ID_DEF] &&
+	      dfa->tables[YYTD_ID_BASE] &&
+	      dfa->tables[YYTD_ID_NXT] && dfa->tables[YYTD_ID_CHK]))
+		goto out;
+
+	/* accept.size == default.size == base.size */
+	state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
+	if (ACCEPT1_FLAGS(flags)) {
+		if (!dfa->tables[YYTD_ID_ACCEPT])
+			goto out;
+		if (state_count != dfa->tables[YYTD_ID_ACCEPT]->td_lolen)
+			goto out;
+	}
+	if (ACCEPT2_FLAGS(flags)) {
+		if (!dfa->tables[YYTD_ID_ACCEPT2])
+			goto out;
+		if (state_count != dfa->tables[YYTD_ID_ACCEPT2]->td_lolen)
+			goto out;
+	}
+	if (state_count != dfa->tables[YYTD_ID_DEF]->td_lolen)
+		goto out;
+
+	/* next.size == chk.size */
+	trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen;
+	if (trans_count != dfa->tables[YYTD_ID_CHK]->td_lolen)
+		goto out;
+
+	/* if equivalence classes then its table size must be 256 */
+	if (dfa->tables[YYTD_ID_EC] &&
+	    dfa->tables[YYTD_ID_EC]->td_lolen != 256)
+		goto out;
+
+	if (flags & DFA_FLAG_VERIFY_STATES) {
+		for (i = 0; i < state_count; i++) {
+			if (DEFAULT_TABLE(dfa)[i] >= state_count)
+				goto out;
+			if (base_idx(BASE_TABLE(dfa)[i]) + 255 >= trans_count) {
+				printk(KERN_ERR "AppArmor DFA next/check upper "
+				       "bounds error\n");
+				goto out;
+			}
+		}
+
+		for (i = 0; i < trans_count; i++) {
+			if (NEXT_TABLE(dfa)[i] >= state_count)
+				goto out;
+			if (CHECK_TABLE(dfa)[i] >= state_count)
+				goto out;
+		}
+	}
+
+	error = 0;
+out:
+	return error;
+}
+
+/**
+ * dfa_free - free a dfa allocated by aa_dfa_unpack
+ * @dfa: the dfa to free  (MAYBE NULL)
+ *
+ * Requires: reference count to dfa == 0
+ */
+static void dfa_free(struct aa_dfa *dfa)
+{
+	if (dfa) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(dfa->tables); i++) {
+			kvfree(dfa->tables[i]);
+			dfa->tables[i] = NULL;
+		}
+		kfree(dfa);
+	}
+}
+
+/**
+ * aa_dfa_free_kref - free aa_dfa by kref (called by aa_put_dfa)
+ * @kr: kref callback for freeing of a dfa  (NOT NULL)
+ */
+void aa_dfa_free_kref(struct kref *kref)
+{
+	struct aa_dfa *dfa = container_of(kref, struct aa_dfa, count);
+	dfa_free(dfa);
+}
+
+/**
+ * aa_dfa_unpack - unpack the binary tables of a serialized dfa
+ * @blob: aligned serialized stream of data to unpack  (NOT NULL)
+ * @size: size of data to unpack
+ * @flags: flags controlling what type of accept tables are acceptable
+ *
+ * Unpack a dfa that has been serialized.  To find information on the dfa
+ * format look in Documentation/security/apparmor.txt
+ * Assumes the dfa @blob stream has been aligned on a 8 byte boundary
+ *
+ * Returns: an unpacked dfa ready for matching or ERR_PTR on failure
+ */
+struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags)
+{
+	int hsize;
+	int error = -ENOMEM;
+	char *data = blob;
+	struct table_header *table = NULL;
+	struct aa_dfa *dfa = kzalloc(sizeof(struct aa_dfa), GFP_KERNEL);
+	if (!dfa)
+		goto fail;
+
+	kref_init(&dfa->count);
+
+	error = -EPROTO;
+
+	/* get dfa table set header */
+	if (size < sizeof(struct table_set_header))
+		goto fail;
+
+	if (ntohl(*(u32 *) data) != YYTH_MAGIC)
+		goto fail;
+
+	hsize = ntohl(*(u32 *) (data + 4));
+	if (size < hsize)
+		goto fail;
+
+	dfa->flags = ntohs(*(u16 *) (data + 12));
+	data += hsize;
+	size -= hsize;
+
+	while (size > 0) {
+		table = unpack_table(data, size);
+		if (!table)
+			goto fail;
+
+		switch (table->td_id) {
+		case YYTD_ID_ACCEPT:
+			if (!(table->td_flags & ACCEPT1_FLAGS(flags)))
+				goto fail;
+			break;
+		case YYTD_ID_ACCEPT2:
+			if (!(table->td_flags & ACCEPT2_FLAGS(flags)))
+				goto fail;
+			break;
+		case YYTD_ID_BASE:
+			if (table->td_flags != YYTD_DATA32)
+				goto fail;
+			break;
+		case YYTD_ID_DEF:
+		case YYTD_ID_NXT:
+		case YYTD_ID_CHK:
+			if (table->td_flags != YYTD_DATA16)
+				goto fail;
+			break;
+		case YYTD_ID_EC:
+			if (table->td_flags != YYTD_DATA8)
+				goto fail;
+			break;
+		default:
+			goto fail;
+		}
+		/* check for duplicate table entry */
+		if (dfa->tables[table->td_id])
+			goto fail;
+		dfa->tables[table->td_id] = table;
+		data += table_size(table->td_lolen, table->td_flags);
+		size -= table_size(table->td_lolen, table->td_flags);
+		table = NULL;
+	}
+
+	error = verify_dfa(dfa, flags);
+	if (error)
+		goto fail;
+
+	return dfa;
+
+fail:
+	kvfree(table);
+	dfa_free(dfa);
+	return ERR_PTR(error);
+}
+
+/**
+ * aa_dfa_match_len - traverse @dfa to find state @str stops at
+ * @dfa: the dfa to match @str against  (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the string of bytes to match against the dfa  (NOT NULL)
+ * @len: length of the string of bytes to match
+ *
+ * aa_dfa_match_len will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * This function will happily match again the 0 byte and only finishes
+ * when @len input is consumed.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
+			      const char *str, int len)
+{
+	u16 *def = DEFAULT_TABLE(dfa);
+	u32 *base = BASE_TABLE(dfa);
+	u16 *next = NEXT_TABLE(dfa);
+	u16 *check = CHECK_TABLE(dfa);
+	unsigned int state = start, pos;
+
+	if (state == 0)
+		return 0;
+
+	/* current state is <state>, matching character *str */
+	if (dfa->tables[YYTD_ID_EC]) {
+		/* Equivalence class table defined */
+		u8 *equiv = EQUIV_TABLE(dfa);
+		/* default is direct to next state */
+		for (; len; len--) {
+			pos = base_idx(base[state]) + equiv[(u8) *str++];
+			if (check[pos] == state)
+				state = next[pos];
+			else
+				state = def[state];
+		}
+	} else {
+		/* default is direct to next state */
+		for (; len; len--) {
+			pos = base_idx(base[state]) + (u8) *str++;
+			if (check[pos] == state)
+				state = next[pos];
+			else
+				state = def[state];
+		}
+	}
+
+	return state;
+}
+
+/**
+ * aa_dfa_match - traverse @dfa to find state @str stops at
+ * @dfa: the dfa to match @str against  (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the null terminated string of bytes to match against the dfa (NOT NULL)
+ *
+ * aa_dfa_match will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
+			  const char *str)
+{
+	u16 *def = DEFAULT_TABLE(dfa);
+	u32 *base = BASE_TABLE(dfa);
+	u16 *next = NEXT_TABLE(dfa);
+	u16 *check = CHECK_TABLE(dfa);
+	unsigned int state = start, pos;
+
+	if (state == 0)
+		return 0;
+
+	/* current state is <state>, matching character *str */
+	if (dfa->tables[YYTD_ID_EC]) {
+		/* Equivalence class table defined */
+		u8 *equiv = EQUIV_TABLE(dfa);
+		/* default is direct to next state */
+		while (*str) {
+			pos = base_idx(base[state]) + equiv[(u8) *str++];
+			if (check[pos] == state)
+				state = next[pos];
+			else
+				state = def[state];
+		}
+	} else {
+		/* default is direct to next state */
+		while (*str) {
+			pos = base_idx(base[state]) + (u8) *str++;
+			if (check[pos] == state)
+				state = next[pos];
+			else
+				state = def[state];
+		}
+	}
+
+	return state;
+}
+
+/**
+ * aa_dfa_next - step one character to the next state in the dfa
+ * @dfa: the dfa to tranverse (NOT NULL)
+ * @state: the state to start in
+ * @c: the input character to transition on
+ *
+ * aa_dfa_match will step through the dfa by one input character @c
+ *
+ * Returns: state reach after input @c
+ */
+unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
+			  const char c)
+{
+	u16 *def = DEFAULT_TABLE(dfa);
+	u32 *base = BASE_TABLE(dfa);
+	u16 *next = NEXT_TABLE(dfa);
+	u16 *check = CHECK_TABLE(dfa);
+	unsigned int pos;
+
+	/* current state is <state>, matching character *str */
+	if (dfa->tables[YYTD_ID_EC]) {
+		/* Equivalence class table defined */
+		u8 *equiv = EQUIV_TABLE(dfa);
+		/* default is direct to next state */
+
+		pos = base_idx(base[state]) + equiv[(u8) c];
+		if (check[pos] == state)
+			state = next[pos];
+		else
+			state = def[state];
+	} else {
+		/* default is direct to next state */
+		pos = base_idx(base[state]) + (u8) c;
+		if (check[pos] == state)
+			state = next[pos];
+		else
+			state = def[state];
+	}
+
+	return state;
+}
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
new file mode 100644
index 0000000..71e0e3a
--- /dev/null
+++ b/security/apparmor/path.c
@@ -0,0 +1,236 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor function for pathnames
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/magic.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/path.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs_struct.h>
+
+#include "include/apparmor.h"
+#include "include/path.h"
+#include "include/policy.h"
+
+
+/* modified from dcache.c */
+static int prepend(char **buffer, int buflen, const char *str, int namelen)
+{
+	buflen -= namelen;
+	if (buflen < 0)
+		return -ENAMETOOLONG;
+	*buffer -= namelen;
+	memcpy(*buffer, str, namelen);
+	return 0;
+}
+
+#define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT)
+
+/**
+ * d_namespace_path - lookup a name associated with a given path
+ * @path: path to lookup  (NOT NULL)
+ * @buf:  buffer to store path to  (NOT NULL)
+ * @buflen: length of @buf
+ * @name: Returns - pointer for start of path name with in @buf (NOT NULL)
+ * @flags: flags controlling path lookup
+ *
+ * Handle path name lookup.
+ *
+ * Returns: %0 else error code if path lookup fails
+ *          When no error the path name is returned in @name which points to
+ *          to a position in @buf
+ */
+static int d_namespace_path(struct path *path, char *buf, int buflen,
+			    char **name, int flags)
+{
+	char *res;
+	int error = 0;
+	int connected = 1;
+
+	if (path->mnt->mnt_flags & MNT_INTERNAL) {
+		/* it's not mounted anywhere */
+		res = dentry_path(path->dentry, buf, buflen);
+		*name = res;
+		if (IS_ERR(res)) {
+			*name = buf;
+			return PTR_ERR(res);
+		}
+		if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+		    strncmp(*name, "/sys/", 5) == 0) {
+			/* TODO: convert over to using a per namespace
+			 * control instead of hard coded /proc
+			 */
+			return prepend(name, *name - buf, "/proc", 5);
+		}
+		return 0;
+	}
+
+	/* resolve paths relative to chroot?*/
+	if (flags & PATH_CHROOT_REL) {
+		struct path root;
+		get_fs_root(current->fs, &root);
+		res = __d_path(path, &root, buf, buflen);
+		path_put(&root);
+	} else {
+		res = d_absolute_path(path, buf, buflen);
+		if (!our_mnt(path->mnt))
+			connected = 0;
+	}
+
+	/* handle error conditions - and still allow a partial path to
+	 * be returned.
+	 */
+	if (!res || IS_ERR(res)) {
+		if (PTR_ERR(res) == -ENAMETOOLONG)
+			return -ENAMETOOLONG;
+		connected = 0;
+		res = dentry_path_raw(path->dentry, buf, buflen);
+		if (IS_ERR(res)) {
+			error = PTR_ERR(res);
+			*name = buf;
+			goto out;
+		};
+	} else if (!our_mnt(path->mnt))
+		connected = 0;
+
+	*name = res;
+
+	/* Handle two cases:
+	 * 1. A deleted dentry && profile is not allowing mediation of deleted
+	 * 2. On some filesystems, newly allocated dentries appear to the
+	 *    security_path hooks as a deleted dentry except without an inode
+	 *    allocated.
+	 */
+	if (d_unlinked(path->dentry) && d_is_positive(path->dentry) &&
+	    !(flags & PATH_MEDIATE_DELETED)) {
+			error = -ENOENT;
+			goto out;
+	}
+
+	/* If the path is not connected to the expected root,
+	 * check if it is a sysctl and handle specially else remove any
+	 * leading / that __d_path may have returned.
+	 * Unless
+	 *     specifically directed to connect the path,
+	 * OR
+	 *     if in a chroot and doing chroot relative paths and the path
+	 *     resolves to the namespace root (would be connected outside
+	 *     of chroot) and specifically directed to connect paths to
+	 *     namespace root.
+	 */
+	if (!connected) {
+		if (!(flags & PATH_CONNECT_PATH) &&
+			   !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
+			     our_mnt(path->mnt))) {
+			/* disconnected path, don't return pathname starting
+			 * with '/'
+			 */
+			error = -EACCES;
+			if (*res == '/')
+				*name = res + 1;
+		}
+	}
+
+out:
+	return error;
+}
+
+/**
+ * get_name_to_buffer - get the pathname to a buffer ensure dir / is appended
+ * @path: path to get name for  (NOT NULL)
+ * @flags: flags controlling path lookup
+ * @buffer: buffer to put name in  (NOT NULL)
+ * @size: size of buffer
+ * @name: Returns - contains position of path name in @buffer (NOT NULL)
+ *
+ * Returns: %0 else error on failure
+ */
+static int get_name_to_buffer(struct path *path, int flags, char *buffer,
+			      int size, char **name, const char **info)
+{
+	int adjust = (flags & PATH_IS_DIR) ? 1 : 0;
+	int error = d_namespace_path(path, buffer, size - adjust, name, flags);
+
+	if (!error && (flags & PATH_IS_DIR) && (*name)[1] != '\0')
+		/*
+		 * Append "/" to the pathname.  The root directory is a special
+		 * case; it already ends in slash.
+		 */
+		strcpy(&buffer[size - 2], "/");
+
+	if (info && error) {
+		if (error == -ENOENT)
+			*info = "Failed name lookup - deleted entry";
+		else if (error == -EACCES)
+			*info = "Failed name lookup - disconnected path";
+		else if (error == -ENAMETOOLONG)
+			*info = "Failed name lookup - name too long";
+		else
+			*info = "Failed name lookup";
+	}
+
+	return error;
+}
+
+/**
+ * aa_path_name - compute the pathname of a file
+ * @path: path the file  (NOT NULL)
+ * @flags: flags controlling path name generation
+ * @buffer: buffer that aa_get_name() allocated  (NOT NULL)
+ * @name: Returns - the generated path name if !error (NOT NULL)
+ * @info: Returns - information on why the path lookup failed (MAYBE NULL)
+ *
+ * @name is a pointer to the beginning of the pathname (which usually differs
+ * from the beginning of the buffer), or NULL.  If there is an error @name
+ * may contain a partial or invalid name that can be used for audit purposes,
+ * but it can not be used for mediation.
+ *
+ * We need PATH_IS_DIR to indicate whether the file is a directory or not
+ * because the file may not yet exist, and so we cannot check the inode's
+ * file type.
+ *
+ * Returns: %0 else error code if could retrieve name
+ */
+int aa_path_name(struct path *path, int flags, char **buffer, const char **name,
+		 const char **info)
+{
+	char *buf, *str = NULL;
+	int size = 256;
+	int error;
+
+	*name = NULL;
+	*buffer = NULL;
+	for (;;) {
+		/* freed by caller */
+		buf = kmalloc(size, GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+
+		error = get_name_to_buffer(path, flags, buf, size, &str, info);
+		if (error != -ENAMETOOLONG)
+			break;
+
+		kfree(buf);
+		size <<= 1;
+		if (size > aa_g_path_max)
+			return -ENAMETOOLONG;
+		*info = NULL;
+	}
+	*buffer = buf;
+	*name = str;
+
+	return error;
+}
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
new file mode 100644
index 0000000..705c287
--- /dev/null
+++ b/security/apparmor/policy.c
@@ -0,0 +1,1301 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy manipulation functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ *
+ * AppArmor policy is based around profiles, which contain the rules a
+ * task is confined by.  Every task in the system has a profile attached
+ * to it determined either by matching "unconfined" tasks against the
+ * visible set of profiles or by following a profiles attachment rules.
+ *
+ * Each profile exists in a profile namespace which is a container of
+ * visible profiles.  Each namespace contains a special "unconfined" profile,
+ * which doesn't enforce any confinement on a task beyond DAC.
+ *
+ * Namespace and profile names can be written together in either
+ * of two syntaxes.
+ *	:namespace:profile - used by kernel interfaces for easy detection
+ *	namespace://profile - used by policy
+ *
+ * Profile names can not start with : or @ or ^ and may not contain \0
+ *
+ * Reserved profile names
+ *	unconfined - special automatically generated unconfined profile
+ *	inherit - special name to indicate profile inheritance
+ *	null-XXXX-YYYY - special automatically generated learning profiles
+ *
+ * Namespace names may not start with / or @ and may not contain \0 or :
+ * Reserved namespace names
+ *	user-XXXX - user defined profiles
+ *
+ * a // in a profile or namespace name indicates a hierarchical name with the
+ * name before the // being the parent and the name after the child.
+ *
+ * Profile and namespace hierarchies serve two different but similar purposes.
+ * The namespace contains the set of visible profiles that are considered
+ * for attachment.  The hierarchy of namespaces allows for virtualizing
+ * the namespace so that for example a chroot can have its own set of profiles
+ * which may define some local user namespaces.
+ * The profile hierarchy severs two distinct purposes,
+ * -  it allows for sub profiles or hats, which allows an application to run
+ *    subprograms under its own profile with different restriction than it
+ *    self, and not have it use the system profile.
+ *    eg. if a mail program starts an editor, the policy might make the
+ *        restrictions tighter on the editor tighter than the mail program,
+ *        and definitely different than general editor restrictions
+ * - it allows for binary hierarchy of profiles, so that execution history
+ *   is preserved.  This feature isn't exploited by AppArmor reference policy
+ *   but is allowed.  NOTE: this is currently suboptimal because profile
+ *   aliasing is not currently implemented so that a profile for each
+ *   level must be defined.
+ *   eg. /bin/bash///bin/ls as a name would indicate /bin/ls was started
+ *       from /bin/bash
+ *
+ *   A profile or namespace name that can contain one or more // separators
+ *   is referred to as an hname (hierarchical).
+ *   eg.  /bin/bash//bin/ls
+ *
+ *   An fqname is a name that may contain both namespace and profile hnames.
+ *   eg. :ns:/bin/bash//bin/ls
+ *
+ * NOTES:
+ *   - locking of profile lists is currently fairly coarse.  All profile
+ *     lists within a namespace use the namespace lock.
+ * FIXME: move profile lists to using rcu_lists
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include "include/apparmor.h"
+#include "include/capability.h"
+#include "include/context.h"
+#include "include/file.h"
+#include "include/ipc.h"
+#include "include/match.h"
+#include "include/path.h"
+#include "include/policy.h"
+#include "include/policy_unpack.h"
+#include "include/resource.h"
+
+
+/* root profile namespace */
+struct aa_namespace *root_ns;
+
+const char *const aa_profile_mode_names[] = {
+	"enforce",
+	"complain",
+	"kill",
+	"unconfined",
+};
+
+/**
+ * hname_tail - find the last component of an hname
+ * @name: hname to find the base profile name component of  (NOT NULL)
+ *
+ * Returns: the tail (base profile name) name component of an hname
+ */
+static const char *hname_tail(const char *hname)
+{
+	char *split;
+	hname = strim((char *)hname);
+	for (split = strstr(hname, "//"); split; split = strstr(hname, "//"))
+		hname = split + 2;
+
+	return hname;
+}
+
+/**
+ * policy_init - initialize a policy structure
+ * @policy: policy to initialize  (NOT NULL)
+ * @prefix: prefix name if any is required.  (MAYBE NULL)
+ * @name: name of the policy, init will make a copy of it  (NOT NULL)
+ *
+ * Note: this fn creates a copy of strings passed in
+ *
+ * Returns: true if policy init successful
+ */
+static bool policy_init(struct aa_policy *policy, const char *prefix,
+			const char *name)
+{
+	/* freed by policy_free */
+	if (prefix) {
+		policy->hname = kmalloc(strlen(prefix) + strlen(name) + 3,
+					GFP_KERNEL);
+		if (policy->hname)
+			sprintf(policy->hname, "%s//%s", prefix, name);
+	} else
+		policy->hname = kstrdup(name, GFP_KERNEL);
+	if (!policy->hname)
+		return 0;
+	/* base.name is a substring of fqname */
+	policy->name = (char *)hname_tail(policy->hname);
+	INIT_LIST_HEAD(&policy->list);
+	INIT_LIST_HEAD(&policy->profiles);
+
+	return 1;
+}
+
+/**
+ * policy_destroy - free the elements referenced by @policy
+ * @policy: policy that is to have its elements freed  (NOT NULL)
+ */
+static void policy_destroy(struct aa_policy *policy)
+{
+	/* still contains profiles -- invalid */
+	if (on_list_rcu(&policy->profiles)) {
+		AA_ERROR("%s: internal error, "
+			 "policy '%s' still contains profiles\n",
+			 __func__, policy->name);
+		BUG();
+	}
+	if (on_list_rcu(&policy->list)) {
+		AA_ERROR("%s: internal error, policy '%s' still on list\n",
+			 __func__, policy->name);
+		BUG();
+	}
+
+	/* don't free name as its a subset of hname */
+	kzfree(policy->hname);
+}
+
+/**
+ * __policy_find - find a policy by @name on a policy list
+ * @head: list to search  (NOT NULL)
+ * @name: name to search for  (NOT NULL)
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted policy that match @name or NULL if not found
+ */
+static struct aa_policy *__policy_find(struct list_head *head, const char *name)
+{
+	struct aa_policy *policy;
+
+	list_for_each_entry_rcu(policy, head, list) {
+		if (!strcmp(policy->name, name))
+			return policy;
+	}
+	return NULL;
+}
+
+/**
+ * __policy_strn_find - find a policy that's name matches @len chars of @str
+ * @head: list to search  (NOT NULL)
+ * @str: string to search for  (NOT NULL)
+ * @len: length of match required
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted policy that match @str or NULL if not found
+ *
+ * if @len == strlen(@strlen) then this is equiv to __policy_find
+ * other wise it allows searching for policy by a partial match of name
+ */
+static struct aa_policy *__policy_strn_find(struct list_head *head,
+					    const char *str, int len)
+{
+	struct aa_policy *policy;
+
+	list_for_each_entry_rcu(policy, head, list) {
+		if (aa_strneq(policy->name, str, len))
+			return policy;
+	}
+
+	return NULL;
+}
+
+/*
+ * Routines for AppArmor namespaces
+ */
+
+static const char *hidden_ns_name = "---";
+/**
+ * aa_ns_visible - test if @view is visible from @curr
+ * @curr: namespace to treat as the parent (NOT NULL)
+ * @view:  namespace to test if visible from @curr (NOT NULL)
+ *
+ * Returns: true if @view is visible from @curr else false
+ */
+bool aa_ns_visible(struct aa_namespace *curr, struct aa_namespace *view)
+{
+	if (curr == view)
+		return true;
+
+	for ( ; view; view = view->parent) {
+		if (view->parent == curr)
+			return true;
+	}
+	return false;
+}
+
+/**
+ * aa_na_name - Find the ns name to display for @view from @curr
+ * @curr - current namespace (NOT NULL)
+ * @view - namespace attempting to view (NOT NULL)
+ *
+ * Returns: name of @view visible from @curr
+ */
+const char *aa_ns_name(struct aa_namespace *curr, struct aa_namespace *view)
+{
+	/* if view == curr then the namespace name isn't displayed */
+	if (curr == view)
+		return "";
+
+	if (aa_ns_visible(curr, view)) {
+		/* at this point if a ns is visible it is in a view ns
+		 * thus the curr ns.hname is a prefix of its name.
+		 * Only output the virtualized portion of the name
+		 * Add + 2 to skip over // separating curr hname prefix
+		 * from the visible tail of the views hname
+		 */
+		return view->base.hname + strlen(curr->base.hname) + 2;
+	} else
+		return hidden_ns_name;
+}
+
+/**
+ * alloc_namespace - allocate, initialize and return a new namespace
+ * @prefix: parent namespace name (MAYBE NULL)
+ * @name: a preallocated name  (NOT NULL)
+ *
+ * Returns: refcounted namespace or NULL on failure.
+ */
+static struct aa_namespace *alloc_namespace(const char *prefix,
+					    const char *name)
+{
+	struct aa_namespace *ns;
+
+	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+	AA_DEBUG("%s(%p)\n", __func__, ns);
+	if (!ns)
+		return NULL;
+	if (!policy_init(&ns->base, prefix, name))
+		goto fail_ns;
+
+	INIT_LIST_HEAD(&ns->sub_ns);
+	mutex_init(&ns->lock);
+
+	/* released by free_namespace */
+	ns->unconfined = aa_alloc_profile("unconfined");
+	if (!ns->unconfined)
+		goto fail_unconfined;
+
+	ns->unconfined->flags = PFLAG_IX_ON_NAME_ERROR |
+		PFLAG_IMMUTABLE | PFLAG_NS_COUNT;
+	ns->unconfined->mode = APPARMOR_UNCONFINED;
+
+	/* ns and ns->unconfined share ns->unconfined refcount */
+	ns->unconfined->ns = ns;
+
+	atomic_set(&ns->uniq_null, 0);
+
+	return ns;
+
+fail_unconfined:
+	kzfree(ns->base.hname);
+fail_ns:
+	kzfree(ns);
+	return NULL;
+}
+
+/**
+ * free_namespace - free a profile namespace
+ * @ns: the namespace to free  (MAYBE NULL)
+ *
+ * Requires: All references to the namespace must have been put, if the
+ *           namespace was referenced by a profile confining a task,
+ */
+static void free_namespace(struct aa_namespace *ns)
+{
+	if (!ns)
+		return;
+
+	policy_destroy(&ns->base);
+	aa_put_namespace(ns->parent);
+
+	ns->unconfined->ns = NULL;
+	aa_free_profile(ns->unconfined);
+	kzfree(ns);
+}
+
+/**
+ * __aa_find_namespace - find a namespace on a list by @name
+ * @head: list to search for namespace on  (NOT NULL)
+ * @name: name of namespace to look for  (NOT NULL)
+ *
+ * Returns: unrefcounted namespace
+ *
+ * Requires: rcu_read_lock be held
+ */
+static struct aa_namespace *__aa_find_namespace(struct list_head *head,
+						const char *name)
+{
+	return (struct aa_namespace *)__policy_find(head, name);
+}
+
+/**
+ * aa_find_namespace  -  look up a profile namespace on the namespace list
+ * @root: namespace to search in  (NOT NULL)
+ * @name: name of namespace to find  (NOT NULL)
+ *
+ * Returns: a refcounted namespace on the list, or NULL if no namespace
+ *          called @name exists.
+ *
+ * refcount released by caller
+ */
+struct aa_namespace *aa_find_namespace(struct aa_namespace *root,
+				       const char *name)
+{
+	struct aa_namespace *ns = NULL;
+
+	rcu_read_lock();
+	ns = aa_get_namespace(__aa_find_namespace(&root->sub_ns, name));
+	rcu_read_unlock();
+
+	return ns;
+}
+
+/**
+ * aa_prepare_namespace - find an existing or create a new namespace of @name
+ * @name: the namespace to find or add  (MAYBE NULL)
+ *
+ * Returns: refcounted namespace or NULL if failed to create one
+ */
+static struct aa_namespace *aa_prepare_namespace(const char *name)
+{
+	struct aa_namespace *ns, *root;
+
+	root = aa_current_profile()->ns;
+
+	mutex_lock(&root->lock);
+
+	/* if name isn't specified the profile is loaded to the current ns */
+	if (!name) {
+		/* released by caller */
+		ns = aa_get_namespace(root);
+		goto out;
+	}
+
+	/* try and find the specified ns and if it doesn't exist create it */
+	/* released by caller */
+	ns = aa_get_namespace(__aa_find_namespace(&root->sub_ns, name));
+	if (!ns) {
+		ns = alloc_namespace(root->base.hname, name);
+		if (!ns)
+			goto out;
+		if (__aa_fs_namespace_mkdir(ns, ns_subns_dir(root), name)) {
+			AA_ERROR("Failed to create interface for ns %s\n",
+				 ns->base.name);
+			free_namespace(ns);
+			ns = NULL;
+			goto out;
+		}
+		ns->parent = aa_get_namespace(root);
+		list_add_rcu(&ns->base.list, &root->sub_ns);
+		/* add list ref */
+		aa_get_namespace(ns);
+	}
+out:
+	mutex_unlock(&root->lock);
+
+	/* return ref */
+	return ns;
+}
+
+/**
+ * __list_add_profile - add a profile to a list
+ * @list: list to add it to  (NOT NULL)
+ * @profile: the profile to add  (NOT NULL)
+ *
+ * refcount @profile, should be put by __list_remove_profile
+ *
+ * Requires: namespace lock be held, or list not be shared
+ */
+static void __list_add_profile(struct list_head *list,
+			       struct aa_profile *profile)
+{
+	list_add_rcu(&profile->base.list, list);
+	/* get list reference */
+	aa_get_profile(profile);
+}
+
+/**
+ * __list_remove_profile - remove a profile from the list it is on
+ * @profile: the profile to remove  (NOT NULL)
+ *
+ * remove a profile from the list, warning generally removal should
+ * be done with __replace_profile as most profile removals are
+ * replacements to the unconfined profile.
+ *
+ * put @profile list refcount
+ *
+ * Requires: namespace lock be held, or list not have been live
+ */
+static void __list_remove_profile(struct aa_profile *profile)
+{
+	list_del_rcu(&profile->base.list);
+	aa_put_profile(profile);
+}
+
+static void __profile_list_release(struct list_head *head);
+
+/**
+ * __remove_profile - remove old profile, and children
+ * @profile: profile to be replaced  (NOT NULL)
+ *
+ * Requires: namespace list lock be held, or list not be shared
+ */
+static void __remove_profile(struct aa_profile *profile)
+{
+	/* release any children lists first */
+	__profile_list_release(&profile->base.profiles);
+	/* released by free_profile */
+	__aa_update_replacedby(profile, profile->ns->unconfined);
+	__aa_fs_profile_rmdir(profile);
+	__list_remove_profile(profile);
+}
+
+/**
+ * __profile_list_release - remove all profiles on the list and put refs
+ * @head: list of profiles  (NOT NULL)
+ *
+ * Requires: namespace lock be held
+ */
+static void __profile_list_release(struct list_head *head)
+{
+	struct aa_profile *profile, *tmp;
+	list_for_each_entry_safe(profile, tmp, head, base.list)
+		__remove_profile(profile);
+}
+
+static void __ns_list_release(struct list_head *head);
+
+/**
+ * destroy_namespace - remove everything contained by @ns
+ * @ns: namespace to have it contents removed  (NOT NULL)
+ */
+static void destroy_namespace(struct aa_namespace *ns)
+{
+	if (!ns)
+		return;
+
+	mutex_lock(&ns->lock);
+	/* release all profiles in this namespace */
+	__profile_list_release(&ns->base.profiles);
+
+	/* release all sub namespaces */
+	__ns_list_release(&ns->sub_ns);
+
+	if (ns->parent)
+		__aa_update_replacedby(ns->unconfined, ns->parent->unconfined);
+	__aa_fs_namespace_rmdir(ns);
+	mutex_unlock(&ns->lock);
+}
+
+/**
+ * __remove_namespace - remove a namespace and all its children
+ * @ns: namespace to be removed  (NOT NULL)
+ *
+ * Requires: ns->parent->lock be held and ns removed from parent.
+ */
+static void __remove_namespace(struct aa_namespace *ns)
+{
+	/* remove ns from namespace list */
+	list_del_rcu(&ns->base.list);
+	destroy_namespace(ns);
+	aa_put_namespace(ns);
+}
+
+/**
+ * __ns_list_release - remove all profile namespaces on the list put refs
+ * @head: list of profile namespaces  (NOT NULL)
+ *
+ * Requires: namespace lock be held
+ */
+static void __ns_list_release(struct list_head *head)
+{
+	struct aa_namespace *ns, *tmp;
+	list_for_each_entry_safe(ns, tmp, head, base.list)
+		__remove_namespace(ns);
+
+}
+
+/**
+ * aa_alloc_root_ns - allocate the root profile namespace
+ *
+ * Returns: %0 on success else error
+ *
+ */
+int __init aa_alloc_root_ns(void)
+{
+	/* released by aa_free_root_ns - used as list ref*/
+	root_ns = alloc_namespace(NULL, "root");
+	if (!root_ns)
+		return -ENOMEM;
+
+	return 0;
+}
+
+ /**
+  * aa_free_root_ns - free the root profile namespace
+  */
+void __init aa_free_root_ns(void)
+ {
+	 struct aa_namespace *ns = root_ns;
+	 root_ns = NULL;
+
+	 destroy_namespace(ns);
+	 aa_put_namespace(ns);
+}
+
+
+static void free_replacedby(struct aa_replacedby *r)
+{
+	if (r) {
+		/* r->profile will not be updated any more as r is dead */
+		aa_put_profile(rcu_dereference_protected(r->profile, true));
+		kzfree(r);
+	}
+}
+
+
+void aa_free_replacedby_kref(struct kref *kref)
+{
+	struct aa_replacedby *r = container_of(kref, struct aa_replacedby,
+					       count);
+	free_replacedby(r);
+}
+
+/**
+ * aa_free_profile - free a profile
+ * @profile: the profile to free  (MAYBE NULL)
+ *
+ * Free a profile, its hats and null_profile. All references to the profile,
+ * its hats and null_profile must have been put.
+ *
+ * If the profile was referenced from a task context, free_profile() will
+ * be called from an rcu callback routine, so we must not sleep here.
+ */
+void aa_free_profile(struct aa_profile *profile)
+{
+	AA_DEBUG("%s(%p)\n", __func__, profile);
+
+	if (!profile)
+		return;
+
+	/* free children profiles */
+	policy_destroy(&profile->base);
+	aa_put_profile(rcu_access_pointer(profile->parent));
+
+	aa_put_namespace(profile->ns);
+	kzfree(profile->rename);
+
+	aa_free_file_rules(&profile->file);
+	aa_free_cap_rules(&profile->caps);
+	aa_free_rlimit_rules(&profile->rlimits);
+
+	kzfree(profile->dirname);
+	aa_put_dfa(profile->xmatch);
+	aa_put_dfa(profile->policy.dfa);
+	aa_put_replacedby(profile->replacedby);
+
+	kzfree(profile->hash);
+	kzfree(profile);
+}
+
+/**
+ * aa_free_profile_rcu - free aa_profile by rcu (called by aa_free_profile_kref)
+ * @head: rcu_head callback for freeing of a profile  (NOT NULL)
+ */
+static void aa_free_profile_rcu(struct rcu_head *head)
+{
+	struct aa_profile *p = container_of(head, struct aa_profile, rcu);
+	if (p->flags & PFLAG_NS_COUNT)
+		free_namespace(p->ns);
+	else
+		aa_free_profile(p);
+}
+
+/**
+ * aa_free_profile_kref - free aa_profile by kref (called by aa_put_profile)
+ * @kr: kref callback for freeing of a profile  (NOT NULL)
+ */
+void aa_free_profile_kref(struct kref *kref)
+{
+	struct aa_profile *p = container_of(kref, struct aa_profile, count);
+	call_rcu(&p->rcu, aa_free_profile_rcu);
+}
+
+/**
+ * aa_alloc_profile - allocate, initialize and return a new profile
+ * @hname: name of the profile  (NOT NULL)
+ *
+ * Returns: refcount profile or NULL on failure
+ */
+struct aa_profile *aa_alloc_profile(const char *hname)
+{
+	struct aa_profile *profile;
+
+	/* freed by free_profile - usually through aa_put_profile */
+	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+	if (!profile)
+		return NULL;
+
+	profile->replacedby = kzalloc(sizeof(struct aa_replacedby), GFP_KERNEL);
+	if (!profile->replacedby)
+		goto fail;
+	kref_init(&profile->replacedby->count);
+
+	if (!policy_init(&profile->base, NULL, hname))
+		goto fail;
+	kref_init(&profile->count);
+
+	/* refcount released by caller */
+	return profile;
+
+fail:
+	kzfree(profile->replacedby);
+	kzfree(profile);
+
+	return NULL;
+}
+
+/**
+ * aa_new_null_profile - create a new null-X learning profile
+ * @parent: profile that caused this profile to be created (NOT NULL)
+ * @hat: true if the null- learning profile is a hat
+ *
+ * Create a null- complain mode profile used in learning mode.  The name of
+ * the profile is unique and follows the format of parent//null-<uniq>.
+ *
+ * null profiles are added to the profile list but the list does not
+ * hold a count on them so that they are automatically released when
+ * not in use.
+ *
+ * Returns: new refcounted profile else NULL on failure
+ */
+struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat)
+{
+	struct aa_profile *profile = NULL;
+	char *name;
+	int uniq = atomic_inc_return(&parent->ns->uniq_null);
+
+	/* freed below */
+	name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, GFP_KERNEL);
+	if (!name)
+		goto fail;
+	sprintf(name, "%s//null-%x", parent->base.hname, uniq);
+
+	profile = aa_alloc_profile(name);
+	kfree(name);
+	if (!profile)
+		goto fail;
+
+	profile->mode = APPARMOR_COMPLAIN;
+	profile->flags = PFLAG_NULL;
+	if (hat)
+		profile->flags |= PFLAG_HAT;
+
+	/* released on free_profile */
+	rcu_assign_pointer(profile->parent, aa_get_profile(parent));
+	profile->ns = aa_get_namespace(parent->ns);
+
+	mutex_lock(&profile->ns->lock);
+	__list_add_profile(&parent->base.profiles, profile);
+	mutex_unlock(&profile->ns->lock);
+
+	/* refcount released by caller */
+	return profile;
+
+fail:
+	return NULL;
+}
+
+/* TODO: profile accounting - setup in remove */
+
+/**
+ * __find_child - find a profile on @head list with a name matching @name
+ * @head: list to search  (NOT NULL)
+ * @name: name of profile (NOT NULL)
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted profile ptr, or NULL if not found
+ */
+static struct aa_profile *__find_child(struct list_head *head, const char *name)
+{
+	return (struct aa_profile *)__policy_find(head, name);
+}
+
+/**
+ * __strn_find_child - find a profile on @head list using substring of @name
+ * @head: list to search  (NOT NULL)
+ * @name: name of profile (NOT NULL)
+ * @len: length of @name substring to match
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted profile ptr, or NULL if not found
+ */
+static struct aa_profile *__strn_find_child(struct list_head *head,
+					    const char *name, int len)
+{
+	return (struct aa_profile *)__policy_strn_find(head, name, len);
+}
+
+/**
+ * aa_find_child - find a profile by @name in @parent
+ * @parent: profile to search  (NOT NULL)
+ * @name: profile name to search for  (NOT NULL)
+ *
+ * Returns: a refcounted profile or NULL if not found
+ */
+struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name)
+{
+	struct aa_profile *profile;
+
+	rcu_read_lock();
+	profile = aa_get_profile(__find_child(&parent->base.profiles, name));
+	rcu_read_unlock();
+
+	/* refcount released by caller */
+	return profile;
+}
+
+/**
+ * __lookup_parent - lookup the parent of a profile of name @hname
+ * @ns: namespace to lookup profile in  (NOT NULL)
+ * @hname: hierarchical profile name to find parent of  (NOT NULL)
+ *
+ * Lookups up the parent of a fully qualified profile name, the profile
+ * that matches hname does not need to exist, in general this
+ * is used to load a new profile.
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted policy or NULL if not found
+ */
+static struct aa_policy *__lookup_parent(struct aa_namespace *ns,
+					 const char *hname)
+{
+	struct aa_policy *policy;
+	struct aa_profile *profile = NULL;
+	char *split;
+
+	policy = &ns->base;
+
+	for (split = strstr(hname, "//"); split;) {
+		profile = __strn_find_child(&policy->profiles, hname,
+					    split - hname);
+		if (!profile)
+			return NULL;
+		policy = &profile->base;
+		hname = split + 2;
+		split = strstr(hname, "//");
+	}
+	if (!profile)
+		return &ns->base;
+	return &profile->base;
+}
+
+/**
+ * __lookup_profile - lookup the profile matching @hname
+ * @base: base list to start looking up profile name from  (NOT NULL)
+ * @hname: hierarchical profile name  (NOT NULL)
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted profile pointer or NULL if not found
+ *
+ * Do a relative name lookup, recursing through profile tree.
+ */
+static struct aa_profile *__lookup_profile(struct aa_policy *base,
+					   const char *hname)
+{
+	struct aa_profile *profile = NULL;
+	char *split;
+
+	for (split = strstr(hname, "//"); split;) {
+		profile = __strn_find_child(&base->profiles, hname,
+					    split - hname);
+		if (!profile)
+			return NULL;
+
+		base = &profile->base;
+		hname = split + 2;
+		split = strstr(hname, "//");
+	}
+
+	profile = __find_child(&base->profiles, hname);
+
+	return profile;
+}
+
+/**
+ * aa_lookup_profile - find a profile by its full or partial name
+ * @ns: the namespace to start from (NOT NULL)
+ * @hname: name to do lookup on.  Does not contain namespace prefix (NOT NULL)
+ *
+ * Returns: refcounted profile or NULL if not found
+ */
+struct aa_profile *aa_lookup_profile(struct aa_namespace *ns, const char *hname)
+{
+	struct aa_profile *profile;
+
+	rcu_read_lock();
+	do {
+		profile = __lookup_profile(&ns->base, hname);
+	} while (profile && !aa_get_profile_not0(profile));
+	rcu_read_unlock();
+
+	/* the unconfined profile is not in the regular profile list */
+	if (!profile && strcmp(hname, "unconfined") == 0)
+		profile = aa_get_newest_profile(ns->unconfined);
+
+	/* refcount released by caller */
+	return profile;
+}
+
+/**
+ * replacement_allowed - test to see if replacement is allowed
+ * @profile: profile to test if it can be replaced  (MAYBE NULL)
+ * @noreplace: true if replacement shouldn't be allowed but addition is okay
+ * @info: Returns - info about why replacement failed (NOT NULL)
+ *
+ * Returns: %0 if replacement allowed else error code
+ */
+static int replacement_allowed(struct aa_profile *profile, int noreplace,
+			       const char **info)
+{
+	if (profile) {
+		if (profile->flags & PFLAG_IMMUTABLE) {
+			*info = "cannot replace immutible profile";
+			return -EPERM;
+		} else if (noreplace) {
+			*info = "profile already exists";
+			return -EEXIST;
+		}
+	}
+	return 0;
+}
+
+/**
+ * aa_audit_policy - Do auditing of policy changes
+ * @op: policy operation being performed
+ * @gfp: memory allocation flags
+ * @name: name of profile being manipulated (NOT NULL)
+ * @info: any extra information to be audited (MAYBE NULL)
+ * @error: error code
+ *
+ * Returns: the error to be returned after audit is done
+ */
+static int audit_policy(int op, gfp_t gfp, const char *name, const char *info,
+			int error)
+{
+	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
+	sa.type = LSM_AUDIT_DATA_NONE;
+	sa.aad = &aad;
+	aad.op = op;
+	aad.name = name;
+	aad.info = info;
+	aad.error = error;
+
+	return aa_audit(AUDIT_APPARMOR_STATUS, __aa_current_profile(), gfp,
+			&sa, NULL);
+}
+
+/**
+ * aa_may_manage_policy - can the current task manage policy
+ * @op: the policy manipulation operation being done
+ *
+ * Returns: true if the task is allowed to manipulate policy
+ */
+bool aa_may_manage_policy(int op)
+{
+	/* check if loading policy is locked out */
+	if (aa_g_lock_policy) {
+		audit_policy(op, GFP_KERNEL, NULL, "policy_locked", -EACCES);
+		return 0;
+	}
+
+	if (!capable(CAP_MAC_ADMIN)) {
+		audit_policy(op, GFP_KERNEL, NULL, "not policy admin", -EACCES);
+		return 0;
+	}
+
+	return 1;
+}
+
+static struct aa_profile *__list_lookup_parent(struct list_head *lh,
+					       struct aa_profile *profile)
+{
+	const char *base = hname_tail(profile->base.hname);
+	long len = base - profile->base.hname;
+	struct aa_load_ent *ent;
+
+	/* parent won't have trailing // so remove from len */
+	if (len <= 2)
+		return NULL;
+	len -= 2;
+
+	list_for_each_entry(ent, lh, list) {
+		if (ent->new == profile)
+			continue;
+		if (strncmp(ent->new->base.hname, profile->base.hname, len) ==
+		    0 && ent->new->base.hname[len] == 0)
+			return ent->new;
+	}
+
+	return NULL;
+}
+
+/**
+ * __replace_profile - replace @old with @new on a list
+ * @old: profile to be replaced  (NOT NULL)
+ * @new: profile to replace @old with  (NOT NULL)
+ * @share_replacedby: transfer @old->replacedby to @new
+ *
+ * Will duplicate and refcount elements that @new inherits from @old
+ * and will inherit @old children.
+ *
+ * refcount @new for list, put @old list refcount
+ *
+ * Requires: namespace list lock be held, or list not be shared
+ */
+static void __replace_profile(struct aa_profile *old, struct aa_profile *new,
+			      bool share_replacedby)
+{
+	struct aa_profile *child, *tmp;
+
+	if (!list_empty(&old->base.profiles)) {
+		LIST_HEAD(lh);
+		list_splice_init_rcu(&old->base.profiles, &lh, synchronize_rcu);
+
+		list_for_each_entry_safe(child, tmp, &lh, base.list) {
+			struct aa_profile *p;
+
+			list_del_init(&child->base.list);
+			p = __find_child(&new->base.profiles, child->base.name);
+			if (p) {
+				/* @p replaces @child  */
+				__replace_profile(child, p, share_replacedby);
+				continue;
+			}
+
+			/* inherit @child and its children */
+			/* TODO: update hname of inherited children */
+			/* list refcount transferred to @new */
+			p = aa_deref_parent(child);
+			rcu_assign_pointer(child->parent, aa_get_profile(new));
+			list_add_rcu(&child->base.list, &new->base.profiles);
+			aa_put_profile(p);
+		}
+	}
+
+	if (!rcu_access_pointer(new->parent)) {
+		struct aa_profile *parent = aa_deref_parent(old);
+		rcu_assign_pointer(new->parent, aa_get_profile(parent));
+	}
+	__aa_update_replacedby(old, new);
+	if (share_replacedby) {
+		aa_put_replacedby(new->replacedby);
+		new->replacedby = aa_get_replacedby(old->replacedby);
+	} else if (!rcu_access_pointer(new->replacedby->profile))
+		/* aafs interface uses replacedby */
+		rcu_assign_pointer(new->replacedby->profile,
+				   aa_get_profile(new));
+	__aa_fs_profile_migrate_dents(old, new);
+
+	if (list_empty(&new->base.list)) {
+		/* new is not on a list already */
+		list_replace_rcu(&old->base.list, &new->base.list);
+		aa_get_profile(new);
+		aa_put_profile(old);
+	} else
+		__list_remove_profile(old);
+}
+
+/**
+ * __lookup_replace - lookup replacement information for a profile
+ * @ns - namespace the lookup occurs in
+ * @hname - name of profile to lookup
+ * @noreplace - true if not replacing an existing profile
+ * @p - Returns: profile to be replaced
+ * @info - Returns: info string on why lookup failed
+ *
+ * Returns: profile to replace (no ref) on success else ptr error
+ */
+static int __lookup_replace(struct aa_namespace *ns, const char *hname,
+			    bool noreplace, struct aa_profile **p,
+			    const char **info)
+{
+	*p = aa_get_profile(__lookup_profile(&ns->base, hname));
+	if (*p) {
+		int error = replacement_allowed(*p, noreplace, info);
+		if (error) {
+			*info = "profile can not be replaced";
+			return error;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * aa_replace_profiles - replace profile(s) on the profile list
+ * @udata: serialized data stream  (NOT NULL)
+ * @size: size of the serialized data stream
+ * @noreplace: true if only doing addition, no replacement allowed
+ *
+ * unpack and replace a profile on the profile list and uses of that profile
+ * by any aa_task_cxt.  If the profile does not exist on the profile list
+ * it is added.
+ *
+ * Returns: size of data consumed else error code on failure.
+ */
+ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
+{
+	const char *ns_name, *name = NULL, *info = NULL;
+	struct aa_namespace *ns = NULL;
+	struct aa_load_ent *ent, *tmp;
+	int op = OP_PROF_REPL;
+	ssize_t error;
+	LIST_HEAD(lh);
+
+	/* released below */
+	error = aa_unpack(udata, size, &lh, &ns_name);
+	if (error)
+		goto out;
+
+	/* released below */
+	ns = aa_prepare_namespace(ns_name);
+	if (!ns) {
+		info = "failed to prepare namespace";
+		error = -ENOMEM;
+		name = ns_name;
+		goto fail;
+	}
+
+	mutex_lock(&ns->lock);
+	/* setup parent and ns info */
+	list_for_each_entry(ent, &lh, list) {
+		struct aa_policy *policy;
+
+		name = ent->new->base.hname;
+		error = __lookup_replace(ns, ent->new->base.hname, noreplace,
+					 &ent->old, &info);
+		if (error)
+			goto fail_lock;
+
+		if (ent->new->rename) {
+			error = __lookup_replace(ns, ent->new->rename,
+						 noreplace, &ent->rename,
+						 &info);
+			if (error)
+				goto fail_lock;
+		}
+
+		/* released when @new is freed */
+		ent->new->ns = aa_get_namespace(ns);
+
+		if (ent->old || ent->rename)
+			continue;
+
+		/* no ref on policy only use inside lock */
+		policy = __lookup_parent(ns, ent->new->base.hname);
+		if (!policy) {
+			struct aa_profile *p;
+			p = __list_lookup_parent(&lh, ent->new);
+			if (!p) {
+				error = -ENOENT;
+				info = "parent does not exist";
+				name = ent->new->base.hname;
+				goto fail_lock;
+			}
+			rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+		} else if (policy != &ns->base) {
+			/* released on profile replacement or free_profile */
+			struct aa_profile *p = (struct aa_profile *) policy;
+			rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+		}
+	}
+
+	/* create new fs entries for introspection if needed */
+	list_for_each_entry(ent, &lh, list) {
+		if (ent->old) {
+			/* inherit old interface files */
+
+			/* if (ent->rename)
+				TODO: support rename */
+		/* } else if (ent->rename) {
+			TODO: support rename */
+		} else {
+			struct dentry *parent;
+			if (rcu_access_pointer(ent->new->parent)) {
+				struct aa_profile *p;
+				p = aa_deref_parent(ent->new);
+				parent = prof_child_dir(p);
+			} else
+				parent = ns_subprofs_dir(ent->new->ns);
+			error = __aa_fs_profile_mkdir(ent->new, parent);
+		}
+
+		if (error) {
+			info = "failed to create ";
+			goto fail_lock;
+		}
+	}
+
+	/* Done with checks that may fail - do actual replacement */
+	list_for_each_entry_safe(ent, tmp, &lh, list) {
+		list_del_init(&ent->list);
+		op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL;
+
+		audit_policy(op, GFP_ATOMIC, ent->new->base.name, NULL, error);
+
+		if (ent->old) {
+			__replace_profile(ent->old, ent->new, 1);
+			if (ent->rename) {
+				/* aafs interface uses replacedby */
+				struct aa_replacedby *r = ent->new->replacedby;
+				rcu_assign_pointer(r->profile,
+						   aa_get_profile(ent->new));
+				__replace_profile(ent->rename, ent->new, 0);
+			}
+		} else if (ent->rename) {
+			/* aafs interface uses replacedby */
+			rcu_assign_pointer(ent->new->replacedby->profile,
+					   aa_get_profile(ent->new));
+			__replace_profile(ent->rename, ent->new, 0);
+		} else if (ent->new->parent) {
+			struct aa_profile *parent, *newest;
+			parent = aa_deref_parent(ent->new);
+			newest = aa_get_newest_profile(parent);
+
+			/* parent replaced in this atomic set? */
+			if (newest != parent) {
+				aa_get_profile(newest);
+				aa_put_profile(parent);
+				rcu_assign_pointer(ent->new->parent, newest);
+			} else
+				aa_put_profile(newest);
+			/* aafs interface uses replacedby */
+			rcu_assign_pointer(ent->new->replacedby->profile,
+					   aa_get_profile(ent->new));
+			__list_add_profile(&parent->base.profiles, ent->new);
+		} else {
+			/* aafs interface uses replacedby */
+			rcu_assign_pointer(ent->new->replacedby->profile,
+					   aa_get_profile(ent->new));
+			__list_add_profile(&ns->base.profiles, ent->new);
+		}
+		aa_load_ent_free(ent);
+	}
+	mutex_unlock(&ns->lock);
+
+out:
+	aa_put_namespace(ns);
+
+	if (error)
+		return error;
+	return size;
+
+fail_lock:
+	mutex_unlock(&ns->lock);
+fail:
+	error = audit_policy(op, GFP_KERNEL, name, info, error);
+
+	list_for_each_entry_safe(ent, tmp, &lh, list) {
+		list_del_init(&ent->list);
+		aa_load_ent_free(ent);
+	}
+
+	goto out;
+}
+
+/**
+ * aa_remove_profiles - remove profile(s) from the system
+ * @fqname: name of the profile or namespace to remove  (NOT NULL)
+ * @size: size of the name
+ *
+ * Remove a profile or sub namespace from the current namespace, so that
+ * they can not be found anymore and mark them as replaced by unconfined
+ *
+ * NOTE: removing confinement does not restore rlimits to preconfinemnet values
+ *
+ * Returns: size of data consume else error code if fails
+ */
+ssize_t aa_remove_profiles(char *fqname, size_t size)
+{
+	struct aa_namespace *root, *ns = NULL;
+	struct aa_profile *profile = NULL;
+	const char *name = fqname, *info = NULL;
+	ssize_t error = 0;
+
+	if (*fqname == 0) {
+		info = "no profile specified";
+		error = -ENOENT;
+		goto fail;
+	}
+
+	root = aa_current_profile()->ns;
+
+	if (fqname[0] == ':') {
+		char *ns_name;
+		name = aa_split_fqname(fqname, &ns_name);
+		/* released below */
+		ns = aa_find_namespace(root, ns_name);
+		if (!ns) {
+			info = "namespace does not exist";
+			error = -ENOENT;
+			goto fail;
+		}
+	} else
+		/* released below */
+		ns = aa_get_namespace(root);
+
+	if (!name) {
+		/* remove namespace - can only happen if fqname[0] == ':' */
+		mutex_lock(&ns->parent->lock);
+		__remove_namespace(ns);
+		mutex_unlock(&ns->parent->lock);
+	} else {
+		/* remove profile */
+		mutex_lock(&ns->lock);
+		profile = aa_get_profile(__lookup_profile(&ns->base, name));
+		if (!profile) {
+			error = -ENOENT;
+			info = "profile does not exist";
+			goto fail_ns_lock;
+		}
+		name = profile->base.hname;
+		__remove_profile(profile);
+		mutex_unlock(&ns->lock);
+	}
+
+	/* don't fail removal if audit fails */
+	(void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
+	aa_put_namespace(ns);
+	aa_put_profile(profile);
+	return size;
+
+fail_ns_lock:
+	mutex_unlock(&ns->lock);
+	aa_put_namespace(ns);
+
+fail:
+	(void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
+	return error;
+}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
new file mode 100644
index 0000000..a689f10
--- /dev/null
+++ b/security/apparmor/policy_unpack.c
@@ -0,0 +1,805 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor functions for unpacking policy loaded from
+ * userspace.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * AppArmor uses a serialized binary format for loading policy. To find
+ * policy format documentation look in Documentation/security/apparmor.txt
+ * All policy is validated before it is used.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/context.h"
+#include "include/crypto.h"
+#include "include/match.h"
+#include "include/policy.h"
+#include "include/policy_unpack.h"
+
+/*
+ * The AppArmor interface treats data as a type byte followed by the
+ * actual data.  The interface has the notion of a a named entry
+ * which has a name (AA_NAME typecode followed by name string) followed by
+ * the entries typecode and data.  Named types allow for optional
+ * elements and extensions to be added and tested for without breaking
+ * backwards compatibility.
+ */
+
+enum aa_code {
+	AA_U8,
+	AA_U16,
+	AA_U32,
+	AA_U64,
+	AA_NAME,		/* same as string except it is items name */
+	AA_STRING,
+	AA_BLOB,
+	AA_STRUCT,
+	AA_STRUCTEND,
+	AA_LIST,
+	AA_LISTEND,
+	AA_ARRAY,
+	AA_ARRAYEND,
+};
+
+/*
+ * aa_ext is the read of the buffer containing the serialized profile.  The
+ * data is copied into a kernel buffer in apparmorfs and then handed off to
+ * the unpack routines.
+ */
+struct aa_ext {
+	void *start;
+	void *end;
+	void *pos;		/* pointer to current position in the buffer */
+	u32 version;
+};
+
+/* audit callback for unpack fields */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+	struct common_audit_data *sa = va;
+	if (sa->aad->iface.target) {
+		struct aa_profile *name = sa->aad->iface.target;
+		audit_log_format(ab, " name=");
+		audit_log_untrustedstring(ab, name->base.hname);
+	}
+	if (sa->aad->iface.pos)
+		audit_log_format(ab, " offset=%ld", sa->aad->iface.pos);
+}
+
+/**
+ * audit_iface - do audit message for policy unpacking/load/replace/remove
+ * @new: profile if it has been allocated (MAYBE NULL)
+ * @name: name of the profile being manipulated (MAYBE NULL)
+ * @info: any extra info about the failure (MAYBE NULL)
+ * @e: buffer position info
+ * @error: error code
+ *
+ * Returns: %0 or error
+ */
+static int audit_iface(struct aa_profile *new, const char *name,
+		       const char *info, struct aa_ext *e, int error)
+{
+	struct aa_profile *profile = __aa_current_profile();
+	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
+	sa.type = LSM_AUDIT_DATA_NONE;
+	sa.aad = &aad;
+	if (e)
+		aad.iface.pos = e->pos - e->start;
+	aad.iface.target = new;
+	aad.name = name;
+	aad.info = info;
+	aad.error = error;
+
+	return aa_audit(AUDIT_APPARMOR_STATUS, profile, GFP_KERNEL, &sa,
+			audit_cb);
+}
+
+/* test if read will be in packed data bounds */
+static bool inbounds(struct aa_ext *e, size_t size)
+{
+	return (size <= e->end - e->pos);
+}
+
+/**
+ * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
+ * @e: serialized data read head (NOT NULL)
+ * @chunk: start address for chunk of data (NOT NULL)
+ *
+ * Returns: the size of chunk found with the read head at the end of the chunk.
+ */
+static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
+{
+	size_t size = 0;
+
+	if (!inbounds(e, sizeof(u16)))
+		return 0;
+	size = le16_to_cpu(get_unaligned((u16 *) e->pos));
+	e->pos += sizeof(u16);
+	if (!inbounds(e, size))
+		return 0;
+	*chunk = e->pos;
+	e->pos += size;
+	return size;
+}
+
+/* unpack control byte */
+static bool unpack_X(struct aa_ext *e, enum aa_code code)
+{
+	if (!inbounds(e, 1))
+		return 0;
+	if (*(u8 *) e->pos != code)
+		return 0;
+	e->pos++;
+	return 1;
+}
+
+/**
+ * unpack_nameX - check is the next element is of type X with a name of @name
+ * @e: serialized data extent information  (NOT NULL)
+ * @code: type code
+ * @name: name to match to the serialized element.  (MAYBE NULL)
+ *
+ * check that the next serialized data element is of type X and has a tag
+ * name @name.  If @name is specified then there must be a matching
+ * name element in the stream.  If @name is NULL any name element will be
+ * skipped and only the typecode will be tested.
+ *
+ * Returns 1 on success (both type code and name tests match) and the read
+ * head is advanced past the headers
+ *
+ * Returns: 0 if either match fails, the read head does not move
+ */
+static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
+{
+	/*
+	 * May need to reset pos if name or type doesn't match
+	 */
+	void *pos = e->pos;
+	/*
+	 * Check for presence of a tagname, and if present name size
+	 * AA_NAME tag value is a u16.
+	 */
+	if (unpack_X(e, AA_NAME)) {
+		char *tag = NULL;
+		size_t size = unpack_u16_chunk(e, &tag);
+		/* if a name is specified it must match. otherwise skip tag */
+		if (name && (!size || strcmp(name, tag)))
+			goto fail;
+	} else if (name) {
+		/* if a name is specified and there is no name tag fail */
+		goto fail;
+	}
+
+	/* now check if type code matches */
+	if (unpack_X(e, code))
+		return 1;
+
+fail:
+	e->pos = pos;
+	return 0;
+}
+
+static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
+{
+	if (unpack_nameX(e, AA_U32, name)) {
+		if (!inbounds(e, sizeof(u32)))
+			return 0;
+		if (data)
+			*data = le32_to_cpu(get_unaligned((u32 *) e->pos));
+		e->pos += sizeof(u32);
+		return 1;
+	}
+	return 0;
+}
+
+static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
+{
+	if (unpack_nameX(e, AA_U64, name)) {
+		if (!inbounds(e, sizeof(u64)))
+			return 0;
+		if (data)
+			*data = le64_to_cpu(get_unaligned((u64 *) e->pos));
+		e->pos += sizeof(u64);
+		return 1;
+	}
+	return 0;
+}
+
+static size_t unpack_array(struct aa_ext *e, const char *name)
+{
+	if (unpack_nameX(e, AA_ARRAY, name)) {
+		int size;
+		if (!inbounds(e, sizeof(u16)))
+			return 0;
+		size = (int)le16_to_cpu(get_unaligned((u16 *) e->pos));
+		e->pos += sizeof(u16);
+		return size;
+	}
+	return 0;
+}
+
+static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
+{
+	if (unpack_nameX(e, AA_BLOB, name)) {
+		u32 size;
+		if (!inbounds(e, sizeof(u32)))
+			return 0;
+		size = le32_to_cpu(get_unaligned((u32 *) e->pos));
+		e->pos += sizeof(u32);
+		if (inbounds(e, (size_t) size)) {
+			*blob = e->pos;
+			e->pos += size;
+			return size;
+		}
+	}
+	return 0;
+}
+
+static int unpack_str(struct aa_ext *e, const char **string, const char *name)
+{
+	char *src_str;
+	size_t size = 0;
+	void *pos = e->pos;
+	*string = NULL;
+	if (unpack_nameX(e, AA_STRING, name)) {
+		size = unpack_u16_chunk(e, &src_str);
+		if (size) {
+			/* strings are null terminated, length is size - 1 */
+			if (src_str[size - 1] != 0)
+				goto fail;
+			*string = src_str;
+		}
+	}
+	return size;
+
+fail:
+	e->pos = pos;
+	return 0;
+}
+
+static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
+{
+	const char *tmp;
+	void *pos = e->pos;
+	int res = unpack_str(e, &tmp, name);
+	*string = NULL;
+
+	if (!res)
+		return 0;
+
+	*string = kmemdup(tmp, res, GFP_KERNEL);
+	if (!*string) {
+		e->pos = pos;
+		return 0;
+	}
+
+	return res;
+}
+
+#define DFA_VALID_PERM_MASK		0xffffffff
+#define DFA_VALID_PERM2_MASK		0xffffffff
+
+/**
+ * verify_accept - verify the accept tables of a dfa
+ * @dfa: dfa to verify accept tables of (NOT NULL)
+ * @flags: flags governing dfa
+ *
+ * Returns: 1 if valid accept tables else 0 if error
+ */
+static bool verify_accept(struct aa_dfa *dfa, int flags)
+{
+	int i;
+
+	/* verify accept permissions */
+	for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
+		int mode = ACCEPT_TABLE(dfa)[i];
+
+		if (mode & ~DFA_VALID_PERM_MASK)
+			return 0;
+
+		if (ACCEPT_TABLE2(dfa)[i] & ~DFA_VALID_PERM2_MASK)
+			return 0;
+	}
+	return 1;
+}
+
+/**
+ * unpack_dfa - unpack a file rule dfa
+ * @e: serialized data extent information (NOT NULL)
+ *
+ * returns dfa or ERR_PTR or NULL if no dfa
+ */
+static struct aa_dfa *unpack_dfa(struct aa_ext *e)
+{
+	char *blob = NULL;
+	size_t size;
+	struct aa_dfa *dfa = NULL;
+
+	size = unpack_blob(e, &blob, "aadfa");
+	if (size) {
+		/*
+		 * The dfa is aligned with in the blob to 8 bytes
+		 * from the beginning of the stream.
+		 * alignment adjust needed by dfa unpack
+		 */
+		size_t sz = blob - (char *) e->start -
+			((e->pos - e->start) & 7);
+		size_t pad = ALIGN(sz, 8) - sz;
+		int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
+			TO_ACCEPT2_FLAG(YYTD_DATA32);
+
+
+		if (aa_g_paranoid_load)
+			flags |= DFA_FLAG_VERIFY_STATES;
+
+		dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
+
+		if (IS_ERR(dfa))
+			return dfa;
+
+		if (!verify_accept(dfa, flags))
+			goto fail;
+	}
+
+	return dfa;
+
+fail:
+	aa_put_dfa(dfa);
+	return ERR_PTR(-EPROTO);
+}
+
+/**
+ * unpack_trans_table - unpack a profile transition table
+ * @e: serialized data extent information  (NOT NULL)
+ * @profile: profile to add the accept table to (NOT NULL)
+ *
+ * Returns: 1 if table successfully unpacked
+ */
+static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+{
+	void *pos = e->pos;
+
+	/* exec table is optional */
+	if (unpack_nameX(e, AA_STRUCT, "xtable")) {
+		int i, size;
+
+		size = unpack_array(e, NULL);
+		/* currently 4 exec bits and entries 0-3 are reserved iupcx */
+		if (size > 16 - 4)
+			goto fail;
+		profile->file.trans.table = kzalloc(sizeof(char *) * size,
+						    GFP_KERNEL);
+		if (!profile->file.trans.table)
+			goto fail;
+
+		profile->file.trans.size = size;
+		for (i = 0; i < size; i++) {
+			char *str;
+			int c, j, size2 = unpack_strdup(e, &str, NULL);
+			/* unpack_strdup verifies that the last character is
+			 * null termination byte.
+			 */
+			if (!size2)
+				goto fail;
+			profile->file.trans.table[i] = str;
+			/* verify that name doesn't start with space */
+			if (isspace(*str))
+				goto fail;
+
+			/* count internal #  of internal \0 */
+			for (c = j = 0; j < size2 - 2; j++) {
+				if (!str[j])
+					c++;
+			}
+			if (*str == ':') {
+				/* beginning with : requires an embedded \0,
+				 * verify that exactly 1 internal \0 exists
+				 * trailing \0 already verified by unpack_strdup
+				 */
+				if (c != 1)
+					goto fail;
+				/* first character after : must be valid */
+				if (!str[1])
+					goto fail;
+			} else if (c)
+				/* fail - all other cases with embedded \0 */
+				goto fail;
+		}
+		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
+			goto fail;
+		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+			goto fail;
+	}
+	return 1;
+
+fail:
+	aa_free_domain_entries(&profile->file.trans);
+	e->pos = pos;
+	return 0;
+}
+
+static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
+{
+	void *pos = e->pos;
+
+	/* rlimits are optional */
+	if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
+		int i, size;
+		u32 tmp = 0;
+		if (!unpack_u32(e, &tmp, NULL))
+			goto fail;
+		profile->rlimits.mask = tmp;
+
+		size = unpack_array(e, NULL);
+		if (size > RLIM_NLIMITS)
+			goto fail;
+		for (i = 0; i < size; i++) {
+			u64 tmp2 = 0;
+			int a = aa_map_resource(i);
+			if (!unpack_u64(e, &tmp2, NULL))
+				goto fail;
+			profile->rlimits.limits[a].rlim_max = tmp2;
+		}
+		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
+			goto fail;
+		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+			goto fail;
+	}
+	return 1;
+
+fail:
+	e->pos = pos;
+	return 0;
+}
+
+/**
+ * unpack_profile - unpack a serialized profile
+ * @e: serialized data extent information (NOT NULL)
+ *
+ * NOTE: unpack profile sets audit struct if there is a failure
+ */
+static struct aa_profile *unpack_profile(struct aa_ext *e)
+{
+	struct aa_profile *profile = NULL;
+	const char *name = NULL;
+	int i, error = -EPROTO;
+	kernel_cap_t tmpcap;
+	u32 tmp;
+
+	/* check that we have the right struct being passed */
+	if (!unpack_nameX(e, AA_STRUCT, "profile"))
+		goto fail;
+	if (!unpack_str(e, &name, NULL))
+		goto fail;
+
+	profile = aa_alloc_profile(name);
+	if (!profile)
+		return ERR_PTR(-ENOMEM);
+
+	/* profile renaming is optional */
+	(void) unpack_str(e, &profile->rename, "rename");
+
+	/* attachment string is optional */
+	(void) unpack_str(e, &profile->attach, "attach");
+
+	/* xmatch is optional and may be NULL */
+	profile->xmatch = unpack_dfa(e);
+	if (IS_ERR(profile->xmatch)) {
+		error = PTR_ERR(profile->xmatch);
+		profile->xmatch = NULL;
+		goto fail;
+	}
+	/* xmatch_len is not optional if xmatch is set */
+	if (profile->xmatch) {
+		if (!unpack_u32(e, &tmp, NULL))
+			goto fail;
+		profile->xmatch_len = tmp;
+	}
+
+	/* per profile debug flags (complain, audit) */
+	if (!unpack_nameX(e, AA_STRUCT, "flags"))
+		goto fail;
+	if (!unpack_u32(e, &tmp, NULL))
+		goto fail;
+	if (tmp & PACKED_FLAG_HAT)
+		profile->flags |= PFLAG_HAT;
+	if (!unpack_u32(e, &tmp, NULL))
+		goto fail;
+	if (tmp == PACKED_MODE_COMPLAIN)
+		profile->mode = APPARMOR_COMPLAIN;
+	else if (tmp == PACKED_MODE_KILL)
+		profile->mode = APPARMOR_KILL;
+	else if (tmp == PACKED_MODE_UNCONFINED)
+		profile->mode = APPARMOR_UNCONFINED;
+	if (!unpack_u32(e, &tmp, NULL))
+		goto fail;
+	if (tmp)
+		profile->audit = AUDIT_ALL;
+
+	if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+		goto fail;
+
+	/* path_flags is optional */
+	if (unpack_u32(e, &profile->path_flags, "path_flags"))
+		profile->path_flags |= profile->flags & PFLAG_MEDIATE_DELETED;
+	else
+		/* set a default value if path_flags field is not present */
+		profile->path_flags = PFLAG_MEDIATE_DELETED;
+
+	if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
+		goto fail;
+	if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
+		goto fail;
+	if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
+		goto fail;
+	if (!unpack_u32(e, &tmpcap.cap[0], NULL))
+		goto fail;
+
+	if (unpack_nameX(e, AA_STRUCT, "caps64")) {
+		/* optional upper half of 64 bit caps */
+		if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
+			goto fail;
+		if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
+			goto fail;
+		if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
+			goto fail;
+		if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
+			goto fail;
+		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+			goto fail;
+	}
+
+	if (unpack_nameX(e, AA_STRUCT, "capsx")) {
+		/* optional extended caps mediation mask */
+		if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
+			goto fail;
+		if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
+			goto fail;
+		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+			goto fail;
+	}
+
+	if (!unpack_rlimits(e, profile))
+		goto fail;
+
+	if (unpack_nameX(e, AA_STRUCT, "policydb")) {
+		/* generic policy dfa - optional and may be NULL */
+		profile->policy.dfa = unpack_dfa(e);
+		if (IS_ERR(profile->policy.dfa)) {
+			error = PTR_ERR(profile->policy.dfa);
+			profile->policy.dfa = NULL;
+			goto fail;
+		}
+		if (!unpack_u32(e, &profile->policy.start[0], "start"))
+			/* default start state */
+			profile->policy.start[0] = DFA_START;
+		/* setup class index */
+		for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
+			profile->policy.start[i] =
+				aa_dfa_next(profile->policy.dfa,
+					    profile->policy.start[0],
+					    i);
+		}
+		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+			goto fail;
+	}
+
+	/* get file rules */
+	profile->file.dfa = unpack_dfa(e);
+	if (IS_ERR(profile->file.dfa)) {
+		error = PTR_ERR(profile->file.dfa);
+		profile->file.dfa = NULL;
+		goto fail;
+	}
+
+	if (!unpack_u32(e, &profile->file.start, "dfa_start"))
+		/* default start state */
+		profile->file.start = DFA_START;
+
+	if (!unpack_trans_table(e, profile))
+		goto fail;
+
+	if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+		goto fail;
+
+	return profile;
+
+fail:
+	if (profile)
+		name = NULL;
+	else if (!name)
+		name = "unknown";
+	audit_iface(profile, name, "failed to unpack profile", e, error);
+	aa_free_profile(profile);
+
+	return ERR_PTR(error);
+}
+
+/**
+ * verify_head - unpack serialized stream header
+ * @e: serialized data read head (NOT NULL)
+ * @required: whether the header is required or optional
+ * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
+ *
+ * Returns: error or 0 if header is good
+ */
+static int verify_header(struct aa_ext *e, int required, const char **ns)
+{
+	int error = -EPROTONOSUPPORT;
+	const char *name = NULL;
+	*ns = NULL;
+
+	/* get the interface version */
+	if (!unpack_u32(e, &e->version, "version")) {
+		if (required) {
+			audit_iface(NULL, NULL, "invalid profile format", e,
+				    error);
+			return error;
+		}
+
+		/* check that the interface version is currently supported */
+		if (e->version != 5) {
+			audit_iface(NULL, NULL, "unsupported interface version",
+				    e, error);
+			return error;
+		}
+	}
+
+
+	/* read the namespace if present */
+	if (unpack_str(e, &name, "namespace")) {
+		if (*ns && strcmp(*ns, name))
+			audit_iface(NULL, NULL, "invalid ns change", e, error);
+		else if (!*ns)
+			*ns = name;
+	}
+
+	return 0;
+}
+
+static bool verify_xindex(int xindex, int table_size)
+{
+	int index, xtype;
+	xtype = xindex & AA_X_TYPE_MASK;
+	index = xindex & AA_X_INDEX_MASK;
+	if (xtype == AA_X_TABLE && index > table_size)
+		return 0;
+	return 1;
+}
+
+/* verify dfa xindexes are in range of transition tables */
+static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
+{
+	int i;
+	for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
+		if (!verify_xindex(dfa_user_xindex(dfa, i), table_size))
+			return 0;
+		if (!verify_xindex(dfa_other_xindex(dfa, i), table_size))
+			return 0;
+	}
+	return 1;
+}
+
+/**
+ * verify_profile - Do post unpack analysis to verify profile consistency
+ * @profile: profile to verify (NOT NULL)
+ *
+ * Returns: 0 if passes verification else error
+ */
+static int verify_profile(struct aa_profile *profile)
+{
+	if (aa_g_paranoid_load) {
+		if (profile->file.dfa &&
+		    !verify_dfa_xindex(profile->file.dfa,
+				       profile->file.trans.size)) {
+			audit_iface(profile, NULL, "Invalid named transition",
+				    NULL, -EPROTO);
+			return -EPROTO;
+		}
+	}
+
+	return 0;
+}
+
+void aa_load_ent_free(struct aa_load_ent *ent)
+{
+	if (ent) {
+		aa_put_profile(ent->rename);
+		aa_put_profile(ent->old);
+		aa_put_profile(ent->new);
+		kzfree(ent);
+	}
+}
+
+struct aa_load_ent *aa_load_ent_alloc(void)
+{
+	struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
+	if (ent)
+		INIT_LIST_HEAD(&ent->list);
+	return ent;
+}
+
+/**
+ * aa_unpack - unpack packed binary profile(s) data loaded from user space
+ * @udata: user data copied to kmem  (NOT NULL)
+ * @size: the size of the user data
+ * @lh: list to place unpacked profiles in a aa_repl_ws
+ * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
+ *
+ * Unpack user data and return refcounted allocated profile(s) stored in
+ * @lh in order of discovery, with the list chain stored in base.list
+ * or error
+ *
+ * Returns: profile(s) on @lh else error pointer if fails to unpack
+ */
+int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns)
+{
+	struct aa_load_ent *tmp, *ent;
+	struct aa_profile *profile = NULL;
+	int error;
+	struct aa_ext e = {
+		.start = udata,
+		.end = udata + size,
+		.pos = udata,
+	};
+
+	*ns = NULL;
+	while (e.pos < e.end) {
+		void *start;
+		error = verify_header(&e, e.pos == e.start, ns);
+		if (error)
+			goto fail;
+
+		start = e.pos;
+		profile = unpack_profile(&e);
+		if (IS_ERR(profile)) {
+			error = PTR_ERR(profile);
+			goto fail;
+		}
+
+		error = verify_profile(profile);
+		if (error)
+			goto fail_profile;
+
+		error = aa_calc_profile_hash(profile, e.version, start,
+					     e.pos - start);
+		if (error)
+			goto fail_profile;
+
+		ent = aa_load_ent_alloc();
+		if (!ent) {
+			error = -ENOMEM;
+			goto fail_profile;
+		}
+
+		ent->new = profile;
+		list_add_tail(&ent->list, lh);
+	}
+
+	return 0;
+
+fail_profile:
+	aa_put_profile(profile);
+
+fail:
+	list_for_each_entry_safe(ent, tmp, lh, list) {
+		list_del_init(&ent->list);
+		aa_load_ent_free(ent);
+	}
+
+	return error;
+}
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
new file mode 100644
index 0000000..b125acc
--- /dev/null
+++ b/security/apparmor/procattr.c
@@ -0,0 +1,165 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor /proc/<pid>/attr/ interface functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include "include/apparmor.h"
+#include "include/context.h"
+#include "include/policy.h"
+#include "include/domain.h"
+#include "include/procattr.h"
+
+
+/**
+ * aa_getprocattr - Return the profile information for @profile
+ * @profile: the profile to print profile info about  (NOT NULL)
+ * @string: Returns - string containing the profile info (NOT NULL)
+ *
+ * Returns: length of @string on success else error on failure
+ *
+ * Requires: profile != NULL
+ *
+ * Creates a string containing the namespace_name://profile_name for
+ * @profile.
+ *
+ * Returns: size of string placed in @string else error code on failure
+ */
+int aa_getprocattr(struct aa_profile *profile, char **string)
+{
+	char *str;
+	int len = 0, mode_len = 0, ns_len = 0, name_len;
+	const char *mode_str = aa_profile_mode_names[profile->mode];
+	const char *ns_name = NULL;
+	struct aa_namespace *ns = profile->ns;
+	struct aa_namespace *current_ns = __aa_current_profile()->ns;
+	char *s;
+
+	if (!aa_ns_visible(current_ns, ns))
+		return -EACCES;
+
+	ns_name = aa_ns_name(current_ns, ns);
+	ns_len = strlen(ns_name);
+
+	/* if the visible ns_name is > 0 increase size for : :// seperator */
+	if (ns_len)
+		ns_len += 4;
+
+	/* unconfined profiles don't have a mode string appended */
+	if (!unconfined(profile))
+		mode_len = strlen(mode_str) + 3;	/* + 3 for _() */
+
+	name_len = strlen(profile->base.hname);
+	len = mode_len + ns_len + name_len + 1;	    /* + 1 for \n */
+	s = str = kmalloc(len + 1, GFP_KERNEL);	    /* + 1 \0 */
+	if (!str)
+		return -ENOMEM;
+
+	if (ns_len) {
+		/* skip over prefix current_ns->base.hname and separating // */
+		sprintf(s, ":%s://", ns_name);
+		s += ns_len;
+	}
+	if (unconfined(profile))
+		/* mode string not being appended */
+		sprintf(s, "%s\n", profile->base.hname);
+	else
+		sprintf(s, "%s (%s)\n", profile->base.hname, mode_str);
+	*string = str;
+
+	/* NOTE: len does not include \0 of string, not saved as part of file */
+	return len;
+}
+
+/**
+ * split_token_from_name - separate a string of form  <token>^<name>
+ * @op: operation being checked
+ * @args: string to parse  (NOT NULL)
+ * @token: stores returned parsed token value  (NOT NULL)
+ *
+ * Returns: start position of name after token else NULL on failure
+ */
+static char *split_token_from_name(int op, char *args, u64 * token)
+{
+	char *name;
+
+	*token = simple_strtoull(args, &name, 16);
+	if ((name == args) || *name != '^') {
+		AA_ERROR("%s: Invalid input '%s'", op_table[op], args);
+		return ERR_PTR(-EINVAL);
+	}
+
+	name++;			/* skip ^ */
+	if (!*name)
+		name = NULL;
+	return name;
+}
+
+/**
+ * aa_setprocattr_chagnehat - handle procattr interface to change_hat
+ * @args: args received from writing to /proc/<pid>/attr/current (NOT NULL)
+ * @size: size of the args
+ * @test: true if this is a test of change_hat permissions
+ *
+ * Returns: %0 or error code if change_hat fails
+ */
+int aa_setprocattr_changehat(char *args, size_t size, int test)
+{
+	char *hat;
+	u64 token;
+	const char *hats[16];		/* current hard limit on # of names */
+	int count = 0;
+
+	hat = split_token_from_name(OP_CHANGE_HAT, args, &token);
+	if (IS_ERR(hat))
+		return PTR_ERR(hat);
+
+	if (!hat && !token) {
+		AA_ERROR("change_hat: Invalid input, NULL hat and NULL magic");
+		return -EINVAL;
+	}
+
+	if (hat) {
+		/* set up hat name vector, args guaranteed null terminated
+		 * at args[size] by setprocattr.
+		 *
+		 * If there are multiple hat names in the buffer each is
+		 * separated by a \0.  Ie. userspace writes them pre tokenized
+		 */
+		char *end = args + size;
+		for (count = 0; (hat < end) && count < 16; ++count) {
+			char *next = hat + strlen(hat) + 1;
+			hats[count] = hat;
+			hat = next;
+		}
+	}
+
+	AA_DEBUG("%s: Magic 0x%llx Hat '%s'\n",
+		 __func__, token, hat ? hat : NULL);
+
+	return aa_change_hat(hats, count, token, test);
+}
+
+/**
+ * aa_setprocattr_changeprofile - handle procattr interface to changeprofile
+ * @fqname: args received from writting to /proc/<pid>/attr/current (NOT NULL)
+ * @onexec: true if change_profile should be delayed until exec
+ * @test: true if this is a test of change_profile permissions
+ *
+ * Returns: %0 or error code if change_profile fails
+ */
+int aa_setprocattr_changeprofile(char *fqname, bool onexec, int test)
+{
+	char *name, *ns_name;
+
+	name = aa_split_fqname(fqname, &ns_name);
+	return aa_change_profile(ns_name, name, onexec, test);
+}
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
new file mode 100644
index 0000000..748bf0c
--- /dev/null
+++ b/security/apparmor/resource.c
@@ -0,0 +1,154 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor resource mediation and attachment
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/audit.h>
+
+#include "include/audit.h"
+#include "include/context.h"
+#include "include/resource.h"
+#include "include/policy.h"
+
+/*
+ * Table of rlimit names: we generate it from resource.h.
+ */
+#include "rlim_names.h"
+
+struct aa_fs_entry aa_fs_entry_rlimit[] = {
+	AA_FS_FILE_STRING("mask", AA_FS_RLIMIT_MASK),
+	{ }
+};
+
+/* audit callback for resource specific fields */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+	struct common_audit_data *sa = va;
+
+	audit_log_format(ab, " rlimit=%s value=%lu",
+			 rlim_names[sa->aad->rlim.rlim], sa->aad->rlim.max);
+}
+
+/**
+ * audit_resource - audit setting resource limit
+ * @profile: profile being enforced  (NOT NULL)
+ * @resoure: rlimit being auditing
+ * @value: value being set
+ * @error: error value
+ *
+ * Returns: 0 or sa->error else other error code on failure
+ */
+static int audit_resource(struct aa_profile *profile, unsigned int resource,
+			  unsigned long value, int error)
+{
+	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
+
+	sa.type = LSM_AUDIT_DATA_NONE;
+	sa.aad = &aad;
+	aad.op = OP_SETRLIMIT,
+	aad.rlim.rlim = resource;
+	aad.rlim.max = value;
+	aad.error = error;
+	return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_KERNEL, &sa,
+			audit_cb);
+}
+
+/**
+ * aa_map_resouce - map compiled policy resource to internal #
+ * @resource: flattened policy resource number
+ *
+ * Returns: resource # for the current architecture.
+ *
+ * rlimit resource can vary based on architecture, map the compiled policy
+ * resource # to the internal representation for the architecture.
+ */
+int aa_map_resource(int resource)
+{
+	return rlim_map[resource];
+}
+
+/**
+ * aa_task_setrlimit - test permission to set an rlimit
+ * @profile - profile confining the task  (NOT NULL)
+ * @task - task the resource is being set on
+ * @resource - the resource being set
+ * @new_rlim - the new resource limit  (NOT NULL)
+ *
+ * Control raising the processes hard limit.
+ *
+ * Returns: 0 or error code if setting resource failed
+ */
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
+		      unsigned int resource, struct rlimit *new_rlim)
+{
+	struct aa_profile *task_profile;
+	int error = 0;
+
+	rcu_read_lock();
+	task_profile = aa_get_profile(aa_cred_profile(__task_cred(task)));
+	rcu_read_unlock();
+
+	/* TODO: extend resource control to handle other (non current)
+	 * profiles.  AppArmor rules currently have the implicit assumption
+	 * that the task is setting the resource of a task confined with
+	 * the same profile.
+	 */
+	if (profile != task_profile ||
+	    (profile->rlimits.mask & (1 << resource) &&
+	     new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
+		error = -EACCES;
+
+	aa_put_profile(task_profile);
+
+	return audit_resource(profile, resource, new_rlim->rlim_max, error);
+}
+
+/**
+ * __aa_transition_rlimits - apply new profile rlimits
+ * @old: old profile on task  (NOT NULL)
+ * @new: new profile with rlimits to apply  (NOT NULL)
+ */
+void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new)
+{
+	unsigned int mask = 0;
+	struct rlimit *rlim, *initrlim;
+	int i;
+
+	/* for any rlimits the profile controlled reset the soft limit
+	 * to the less of the tasks hard limit and the init tasks soft limit
+	 */
+	if (old->rlimits.mask) {
+		for (i = 0, mask = 1; i < RLIM_NLIMITS; i++, mask <<= 1) {
+			if (old->rlimits.mask & mask) {
+				rlim = current->signal->rlim + i;
+				initrlim = init_task.signal->rlim + i;
+				rlim->rlim_cur = min(rlim->rlim_max,
+						     initrlim->rlim_cur);
+			}
+		}
+	}
+
+	/* set any new hard limits as dictated by the new profile */
+	if (!new->rlimits.mask)
+		return;
+	for (i = 0, mask = 1; i < RLIM_NLIMITS; i++, mask <<= 1) {
+		if (!(new->rlimits.mask & mask))
+			continue;
+
+		rlim = current->signal->rlim + i;
+		rlim->rlim_max = min(rlim->rlim_max,
+				     new->rlimits.limits[i].rlim_max);
+		/* soft limit should not exceed hard limit */
+		rlim->rlim_cur = min(rlim->rlim_cur, rlim->rlim_max);
+	}
+}
diff --git a/security/apparmor/sid.c b/security/apparmor/sid.c
new file mode 100644
index 0000000..f0b34f7
--- /dev/null
+++ b/security/apparmor/sid.c
@@ -0,0 +1,55 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor security identifier (sid) manipulation fns
+ *
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ *
+ * AppArmor allocates a unique sid for every profile loaded.  If a profile
+ * is replaced it receives the sid of the profile it is replacing.
+ *
+ * The sid value of 0 is invalid.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+#include "include/sid.h"
+
+/* global counter from which sids are allocated */
+static u32 global_sid;
+static DEFINE_SPINLOCK(sid_lock);
+
+/* TODO FIXME: add sid to profile mapping, and sid recycling */
+
+/**
+ * aa_alloc_sid - allocate a new sid for a profile
+ */
+u32 aa_alloc_sid(void)
+{
+	u32 sid;
+
+	/*
+	 * TODO FIXME: sid recycling - part of profile mapping table
+	 */
+	spin_lock(&sid_lock);
+	sid = (++global_sid);
+	spin_unlock(&sid_lock);
+	return sid;
+}
+
+/**
+ * aa_free_sid - free a sid
+ * @sid: sid to free
+ */
+void aa_free_sid(u32 sid)
+{
+	;			/* NOP ATM */
+}
diff --git a/security/commoncap.c b/security/commoncap.c
new file mode 100644
index 0000000..48071ed
--- /dev/null
+++ b/security/commoncap.c
@@ -0,0 +1,1096 @@
+/* Common capabilities, needed by capability.o.
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ */
+
+#include <linux/capability.h>
+#include <linux/audit.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/lsm_hooks.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/ptrace.h>
+#include <linux/xattr.h>
+#include <linux/hugetlb.h>
+#include <linux/mount.h>
+#include <linux/sched.h>
+#include <linux/prctl.h>
+#include <linux/securebits.h>
+#include <linux/user_namespace.h>
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+
+/*
+ * If a non-root user executes a setuid-root binary in
+ * !secure(SECURE_NOROOT) mode, then we raise capabilities.
+ * However if fE is also set, then the intent is for only
+ * the file capabilities to be applied, and the setuid-root
+ * bit is left on either to change the uid (plausible) or
+ * to get full privilege on a kernel without file capabilities
+ * support.  So in that case we do not raise capabilities.
+ *
+ * Warn if that happens, once per boot.
+ */
+static void warn_setuid_and_fcaps_mixed(const char *fname)
+{
+	static int warned;
+	if (!warned) {
+		printk(KERN_INFO "warning: `%s' has both setuid-root and"
+			" effective capabilities. Therefore not raising all"
+			" capabilities.\n", fname);
+		warned = 1;
+	}
+}
+
+/**
+ * cap_capable - Determine whether a task has a particular effective capability
+ * @cred: The credentials to use
+ * @ns:  The user namespace in which we need the capability
+ * @cap: The capability to check for
+ * @audit: Whether to write an audit message or not
+ *
+ * Determine whether the nominated task has the specified capability amongst
+ * its effective set, returning 0 if it does, -ve if it does not.
+ *
+ * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable()
+ * and has_capability() functions.  That is, it has the reverse semantics:
+ * cap_has_capability() returns 0 when a task has a capability, but the
+ * kernel's capable() and has_capability() returns 1 for this case.
+ */
+int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
+		int cap, int audit)
+{
+	struct user_namespace *ns = targ_ns;
+
+	/* See if cred has the capability in the target user namespace
+	 * by examining the target user namespace and all of the target
+	 * user namespace's parents.
+	 */
+	for (;;) {
+		/* Do we have the necessary capabilities? */
+		if (ns == cred->user_ns)
+			return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
+
+		/* Have we tried all of the parent namespaces? */
+		if (ns == &init_user_ns)
+			return -EPERM;
+
+		/* 
+		 * The owner of the user namespace in the parent of the
+		 * user namespace has all caps.
+		 */
+		if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid))
+			return 0;
+
+		/*
+		 * If you have a capability in a parent user ns, then you have
+		 * it over all children user namespaces as well.
+		 */
+		ns = ns->parent;
+	}
+
+	/* We never get here */
+}
+
+/**
+ * cap_settime - Determine whether the current process may set the system clock
+ * @ts: The time to set
+ * @tz: The timezone to set
+ *
+ * Determine whether the current process may set the system clock and timezone
+ * information, returning 0 if permission granted, -ve if denied.
+ */
+int cap_settime(const struct timespec *ts, const struct timezone *tz)
+{
+	if (!capable(CAP_SYS_TIME))
+		return -EPERM;
+	return 0;
+}
+
+/**
+ * cap_ptrace_access_check - Determine whether the current process may access
+ *			   another
+ * @child: The process to be accessed
+ * @mode: The mode of attachment.
+ *
+ * If we are in the same or an ancestor user_ns and have all the target
+ * task's capabilities, then ptrace access is allowed.
+ * If we have the ptrace capability to the target user_ns, then ptrace
+ * access is allowed.
+ * Else denied.
+ *
+ * Determine whether a process may access another, returning 0 if permission
+ * granted, -ve if denied.
+ */
+int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
+{
+	int ret = 0;
+	const struct cred *cred, *child_cred;
+	const kernel_cap_t *caller_caps;
+
+	rcu_read_lock();
+	cred = current_cred();
+	child_cred = __task_cred(child);
+	if (mode & PTRACE_MODE_FSCREDS)
+		caller_caps = &cred->cap_effective;
+	else
+		caller_caps = &cred->cap_permitted;
+	if (cred->user_ns == child_cred->user_ns &&
+	    cap_issubset(child_cred->cap_permitted, *caller_caps))
+		goto out;
+	if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
+		goto out;
+	ret = -EPERM;
+out:
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * cap_ptrace_traceme - Determine whether another process may trace the current
+ * @parent: The task proposed to be the tracer
+ *
+ * If parent is in the same or an ancestor user_ns and has all current's
+ * capabilities, then ptrace access is allowed.
+ * If parent has the ptrace capability to current's user_ns, then ptrace
+ * access is allowed.
+ * Else denied.
+ *
+ * Determine whether the nominated task is permitted to trace the current
+ * process, returning 0 if permission is granted, -ve if denied.
+ */
+int cap_ptrace_traceme(struct task_struct *parent)
+{
+	int ret = 0;
+	const struct cred *cred, *child_cred;
+
+	rcu_read_lock();
+	cred = __task_cred(parent);
+	child_cred = current_cred();
+	if (cred->user_ns == child_cred->user_ns &&
+	    cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
+		goto out;
+	if (has_ns_capability(parent, child_cred->user_ns, CAP_SYS_PTRACE))
+		goto out;
+	ret = -EPERM;
+out:
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * cap_capget - Retrieve a task's capability sets
+ * @target: The task from which to retrieve the capability sets
+ * @effective: The place to record the effective set
+ * @inheritable: The place to record the inheritable set
+ * @permitted: The place to record the permitted set
+ *
+ * This function retrieves the capabilities of the nominated task and returns
+ * them to the caller.
+ */
+int cap_capget(struct task_struct *target, kernel_cap_t *effective,
+	       kernel_cap_t *inheritable, kernel_cap_t *permitted)
+{
+	const struct cred *cred;
+
+	/* Derived from kernel/capability.c:sys_capget. */
+	rcu_read_lock();
+	cred = __task_cred(target);
+	*effective   = cred->cap_effective;
+	*inheritable = cred->cap_inheritable;
+	*permitted   = cred->cap_permitted;
+	rcu_read_unlock();
+	return 0;
+}
+
+/*
+ * Determine whether the inheritable capabilities are limited to the old
+ * permitted set.  Returns 1 if they are limited, 0 if they are not.
+ */
+static inline int cap_inh_is_capped(void)
+{
+
+	/* they are so limited unless the current task has the CAP_SETPCAP
+	 * capability
+	 */
+	if (cap_capable(current_cred(), current_cred()->user_ns,
+			CAP_SETPCAP, SECURITY_CAP_AUDIT) == 0)
+		return 0;
+	return 1;
+}
+
+/**
+ * cap_capset - Validate and apply proposed changes to current's capabilities
+ * @new: The proposed new credentials; alterations should be made here
+ * @old: The current task's current credentials
+ * @effective: A pointer to the proposed new effective capabilities set
+ * @inheritable: A pointer to the proposed new inheritable capabilities set
+ * @permitted: A pointer to the proposed new permitted capabilities set
+ *
+ * This function validates and applies a proposed mass change to the current
+ * process's capability sets.  The changes are made to the proposed new
+ * credentials, and assuming no error, will be committed by the caller of LSM.
+ */
+int cap_capset(struct cred *new,
+	       const struct cred *old,
+	       const kernel_cap_t *effective,
+	       const kernel_cap_t *inheritable,
+	       const kernel_cap_t *permitted)
+{
+	if (cap_inh_is_capped() &&
+	    !cap_issubset(*inheritable,
+			  cap_combine(old->cap_inheritable,
+				      old->cap_permitted)))
+		/* incapable of using this inheritable set */
+		return -EPERM;
+
+	if (!cap_issubset(*inheritable,
+			  cap_combine(old->cap_inheritable,
+				      old->cap_bset)))
+		/* no new pI capabilities outside bounding set */
+		return -EPERM;
+
+	/* verify restrictions on target's new Permitted set */
+	if (!cap_issubset(*permitted, old->cap_permitted))
+		return -EPERM;
+
+	/* verify the _new_Effective_ is a subset of the _new_Permitted_ */
+	if (!cap_issubset(*effective, *permitted))
+		return -EPERM;
+
+	new->cap_effective   = *effective;
+	new->cap_inheritable = *inheritable;
+	new->cap_permitted   = *permitted;
+
+	/*
+	 * Mask off ambient bits that are no longer both permitted and
+	 * inheritable.
+	 */
+	new->cap_ambient = cap_intersect(new->cap_ambient,
+					 cap_intersect(*permitted,
+						       *inheritable));
+	if (WARN_ON(!cap_ambient_invariant_ok(new)))
+		return -EINVAL;
+	return 0;
+}
+
+/*
+ * Clear proposed capability sets for execve().
+ */
+static inline void bprm_clear_caps(struct linux_binprm *bprm)
+{
+	cap_clear(bprm->cred->cap_permitted);
+	bprm->cap_effective = false;
+}
+
+/**
+ * cap_inode_need_killpriv - Determine if inode change affects privileges
+ * @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV
+ *
+ * Determine if an inode having a change applied that's marked ATTR_KILL_PRIV
+ * affects the security markings on that inode, and if it is, should
+ * inode_killpriv() be invoked or the change rejected?
+ *
+ * Returns 0 if granted; +ve if granted, but inode_killpriv() is required; and
+ * -ve to deny the change.
+ */
+int cap_inode_need_killpriv(struct dentry *dentry)
+{
+	struct inode *inode = d_backing_inode(dentry);
+	int error;
+
+	if (!inode->i_op->getxattr)
+	       return 0;
+
+	error = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS, NULL, 0);
+	if (error <= 0)
+		return 0;
+	return 1;
+}
+
+/**
+ * cap_inode_killpriv - Erase the security markings on an inode
+ * @dentry: The inode/dentry to alter
+ *
+ * Erase the privilege-enhancing security markings on an inode.
+ *
+ * Returns 0 if successful, -ve on error.
+ */
+int cap_inode_killpriv(struct dentry *dentry)
+{
+	struct inode *inode = d_backing_inode(dentry);
+
+	if (!inode->i_op->removexattr)
+	       return 0;
+
+	return inode->i_op->removexattr(dentry, XATTR_NAME_CAPS);
+}
+
+/*
+ * Calculate the new process capability sets from the capability sets attached
+ * to a file.
+ */
+static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
+					  struct linux_binprm *bprm,
+					  bool *effective,
+					  bool *has_cap)
+{
+	struct cred *new = bprm->cred;
+	unsigned i;
+	int ret = 0;
+
+	if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
+		*effective = true;
+
+	if (caps->magic_etc & VFS_CAP_REVISION_MASK)
+		*has_cap = true;
+
+	CAP_FOR_EACH_U32(i) {
+		__u32 permitted = caps->permitted.cap[i];
+		__u32 inheritable = caps->inheritable.cap[i];
+
+		/*
+		 * pP' = (X & fP) | (pI & fI)
+		 * The addition of pA' is handled later.
+		 */
+		new->cap_permitted.cap[i] =
+			(new->cap_bset.cap[i] & permitted) |
+			(new->cap_inheritable.cap[i] & inheritable);
+
+		if (permitted & ~new->cap_permitted.cap[i])
+			/* insufficient to execute correctly */
+			ret = -EPERM;
+	}
+
+	/*
+	 * For legacy apps, with no internal support for recognizing they
+	 * do not have enough capabilities, we return an error if they are
+	 * missing some "forced" (aka file-permitted) capabilities.
+	 */
+	return *effective ? ret : 0;
+}
+
+/*
+ * Extract the on-exec-apply capability sets for an executable file.
+ */
+int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps)
+{
+	struct inode *inode = d_backing_inode(dentry);
+	__u32 magic_etc;
+	unsigned tocopy, i;
+	int size;
+	struct vfs_cap_data caps;
+
+	memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
+
+	if (!inode || !inode->i_op->getxattr)
+		return -ENODATA;
+
+	size = inode->i_op->getxattr((struct dentry *)dentry, XATTR_NAME_CAPS, &caps,
+				   XATTR_CAPS_SZ);
+	if (size == -ENODATA || size == -EOPNOTSUPP)
+		/* no data, that's ok */
+		return -ENODATA;
+	if (size < 0)
+		return size;
+
+	if (size < sizeof(magic_etc))
+		return -EINVAL;
+
+	cpu_caps->magic_etc = magic_etc = le32_to_cpu(caps.magic_etc);
+
+	switch (magic_etc & VFS_CAP_REVISION_MASK) {
+	case VFS_CAP_REVISION_1:
+		if (size != XATTR_CAPS_SZ_1)
+			return -EINVAL;
+		tocopy = VFS_CAP_U32_1;
+		break;
+	case VFS_CAP_REVISION_2:
+		if (size != XATTR_CAPS_SZ_2)
+			return -EINVAL;
+		tocopy = VFS_CAP_U32_2;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	CAP_FOR_EACH_U32(i) {
+		if (i >= tocopy)
+			break;
+		cpu_caps->permitted.cap[i] = le32_to_cpu(caps.data[i].permitted);
+		cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
+	}
+
+	cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+	cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+
+	return 0;
+}
+
+/*
+ * Attempt to get the on-exec apply capability sets for an executable file from
+ * its xattrs and, if present, apply them to the proposed credentials being
+ * constructed by execve().
+ */
+static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_cap)
+{
+	int rc = 0;
+	struct cpu_vfs_cap_data vcaps;
+
+	bprm_clear_caps(bprm);
+
+	if (!file_caps_enabled)
+		return 0;
+
+	if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
+		return 0;
+
+	rc = get_vfs_caps_from_disk(bprm->file->f_path.dentry, &vcaps);
+	if (rc < 0) {
+		if (rc == -EINVAL)
+			printk(KERN_NOTICE "%s: get_vfs_caps_from_disk returned %d for %s\n",
+				__func__, rc, bprm->filename);
+		else if (rc == -ENODATA)
+			rc = 0;
+		goto out;
+	}
+
+	rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_cap);
+	if (rc == -EINVAL)
+		printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
+		       __func__, rc, bprm->filename);
+
+out:
+	if (rc)
+		bprm_clear_caps(bprm);
+
+	return rc;
+}
+
+/**
+ * cap_bprm_set_creds - Set up the proposed credentials for execve().
+ * @bprm: The execution parameters, including the proposed creds
+ *
+ * Set up the proposed credentials for a new execution context being
+ * constructed by execve().  The proposed creds in @bprm->cred is altered,
+ * which won't take effect immediately.  Returns 0 if successful, -ve on error.
+ */
+int cap_bprm_set_creds(struct linux_binprm *bprm)
+{
+	const struct cred *old = current_cred();
+	struct cred *new = bprm->cred;
+	bool effective, has_cap = false, is_setid;
+	int ret;
+	kuid_t root_uid;
+
+	if (WARN_ON(!cap_ambient_invariant_ok(old)))
+		return -EPERM;
+
+	effective = false;
+	ret = get_file_caps(bprm, &effective, &has_cap);
+	if (ret < 0)
+		return ret;
+
+	root_uid = make_kuid(new->user_ns, 0);
+
+	if (!issecure(SECURE_NOROOT)) {
+		/*
+		 * If the legacy file capability is set, then don't set privs
+		 * for a setuid root binary run by a non-root user.  Do set it
+		 * for a root user just to cause least surprise to an admin.
+		 */
+		if (has_cap && !uid_eq(new->uid, root_uid) && uid_eq(new->euid, root_uid)) {
+			warn_setuid_and_fcaps_mixed(bprm->filename);
+			goto skip;
+		}
+		/*
+		 * To support inheritance of root-permissions and suid-root
+		 * executables under compatibility mode, we override the
+		 * capability sets for the file.
+		 *
+		 * If only the real uid is 0, we do not set the effective bit.
+		 */
+		if (uid_eq(new->euid, root_uid) || uid_eq(new->uid, root_uid)) {
+			/* pP' = (cap_bset & ~0) | (pI & ~0) */
+			new->cap_permitted = cap_combine(old->cap_bset,
+							 old->cap_inheritable);
+		}
+		if (uid_eq(new->euid, root_uid))
+			effective = true;
+	}
+skip:
+
+	/* if we have fs caps, clear dangerous personality flags */
+	if (!cap_issubset(new->cap_permitted, old->cap_permitted))
+		bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+
+	/* Don't let someone trace a set[ug]id/setpcap binary with the revised
+	 * credentials unless they have the appropriate permit.
+	 *
+	 * In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
+	 */
+	is_setid = !uid_eq(new->euid, old->uid) || !gid_eq(new->egid, old->gid);
+
+	if ((is_setid ||
+	     !cap_issubset(new->cap_permitted, old->cap_permitted)) &&
+	    bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
+		/* downgrade; they get no more than they had, and maybe less */
+		if (!capable(CAP_SETUID) ||
+		    (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) {
+			new->euid = new->uid;
+			new->egid = new->gid;
+		}
+		new->cap_permitted = cap_intersect(new->cap_permitted,
+						   old->cap_permitted);
+	}
+
+	new->suid = new->fsuid = new->euid;
+	new->sgid = new->fsgid = new->egid;
+
+	/* File caps or setid cancels ambient. */
+	if (has_cap || is_setid)
+		cap_clear(new->cap_ambient);
+
+	/*
+	 * Now that we've computed pA', update pP' to give:
+	 *   pP' = (X & fP) | (pI & fI) | pA'
+	 */
+	new->cap_permitted = cap_combine(new->cap_permitted, new->cap_ambient);
+
+	/*
+	 * Set pE' = (fE ? pP' : pA').  Because pA' is zero if fE is set,
+	 * this is the same as pE' = (fE ? pP' : 0) | pA'.
+	 */
+	if (effective)
+		new->cap_effective = new->cap_permitted;
+	else
+		new->cap_effective = new->cap_ambient;
+
+	if (WARN_ON(!cap_ambient_invariant_ok(new)))
+		return -EPERM;
+
+	bprm->cap_effective = effective;
+
+	/*
+	 * Audit candidate if current->cap_effective is set
+	 *
+	 * We do not bother to audit if 3 things are true:
+	 *   1) cap_effective has all caps
+	 *   2) we are root
+	 *   3) root is supposed to have all caps (SECURE_NOROOT)
+	 * Since this is just a normal root execing a process.
+	 *
+	 * Number 1 above might fail if you don't have a full bset, but I think
+	 * that is interesting information to audit.
+	 */
+	if (!cap_issubset(new->cap_effective, new->cap_ambient)) {
+		if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
+		    !uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) ||
+		    issecure(SECURE_NOROOT)) {
+			ret = audit_log_bprm_fcaps(bprm, new, old);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
+
+	if (WARN_ON(!cap_ambient_invariant_ok(new)))
+		return -EPERM;
+
+	return 0;
+}
+
+/**
+ * cap_bprm_secureexec - Determine whether a secure execution is required
+ * @bprm: The execution parameters
+ *
+ * Determine whether a secure execution is required, return 1 if it is, and 0
+ * if it is not.
+ *
+ * The credentials have been committed by this point, and so are no longer
+ * available through @bprm->cred.
+ */
+int cap_bprm_secureexec(struct linux_binprm *bprm)
+{
+	const struct cred *cred = current_cred();
+	kuid_t root_uid = make_kuid(cred->user_ns, 0);
+
+	if (!uid_eq(cred->uid, root_uid)) {
+		if (bprm->cap_effective)
+			return 1;
+		if (!cap_issubset(cred->cap_permitted, cred->cap_ambient))
+			return 1;
+	}
+
+	return (!uid_eq(cred->euid, cred->uid) ||
+		!gid_eq(cred->egid, cred->gid));
+}
+
+/**
+ * cap_inode_setxattr - Determine whether an xattr may be altered
+ * @dentry: The inode/dentry being altered
+ * @name: The name of the xattr to be changed
+ * @value: The value that the xattr will be changed to
+ * @size: The size of value
+ * @flags: The replacement flag
+ *
+ * Determine whether an xattr may be altered or set on an inode, returning 0 if
+ * permission is granted, -ve if denied.
+ *
+ * This is used to make sure security xattrs don't get updated or set by those
+ * who aren't privileged to do so.
+ */
+int cap_inode_setxattr(struct dentry *dentry, const char *name,
+		       const void *value, size_t size, int flags)
+{
+	if (!strcmp(name, XATTR_NAME_CAPS)) {
+		if (!capable(CAP_SETFCAP))
+			return -EPERM;
+		return 0;
+	}
+
+	if (!strncmp(name, XATTR_SECURITY_PREFIX,
+		     sizeof(XATTR_SECURITY_PREFIX) - 1) &&
+	    !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	return 0;
+}
+
+/**
+ * cap_inode_removexattr - Determine whether an xattr may be removed
+ * @dentry: The inode/dentry being altered
+ * @name: The name of the xattr to be changed
+ *
+ * Determine whether an xattr may be removed from an inode, returning 0 if
+ * permission is granted, -ve if denied.
+ *
+ * This is used to make sure security xattrs don't get removed by those who
+ * aren't privileged to remove them.
+ */
+int cap_inode_removexattr(struct dentry *dentry, const char *name)
+{
+	if (!strcmp(name, XATTR_NAME_CAPS)) {
+		if (!capable(CAP_SETFCAP))
+			return -EPERM;
+		return 0;
+	}
+
+	if (!strncmp(name, XATTR_SECURITY_PREFIX,
+		     sizeof(XATTR_SECURITY_PREFIX) - 1) &&
+	    !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	return 0;
+}
+
+/*
+ * cap_emulate_setxuid() fixes the effective / permitted capabilities of
+ * a process after a call to setuid, setreuid, or setresuid.
+ *
+ *  1) When set*uiding _from_ one of {r,e,s}uid == 0 _to_ all of
+ *  {r,e,s}uid != 0, the permitted and effective capabilities are
+ *  cleared.
+ *
+ *  2) When set*uiding _from_ euid == 0 _to_ euid != 0, the effective
+ *  capabilities of the process are cleared.
+ *
+ *  3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective
+ *  capabilities are set to the permitted capabilities.
+ *
+ *  fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should
+ *  never happen.
+ *
+ *  -astor
+ *
+ * cevans - New behaviour, Oct '99
+ * A process may, via prctl(), elect to keep its capabilities when it
+ * calls setuid() and switches away from uid==0. Both permitted and
+ * effective sets will be retained.
+ * Without this change, it was impossible for a daemon to drop only some
+ * of its privilege. The call to setuid(!=0) would drop all privileges!
+ * Keeping uid 0 is not an option because uid 0 owns too many vital
+ * files..
+ * Thanks to Olaf Kirch and Peter Benie for spotting this.
+ */
+static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old)
+{
+	kuid_t root_uid = make_kuid(old->user_ns, 0);
+
+	if ((uid_eq(old->uid, root_uid) ||
+	     uid_eq(old->euid, root_uid) ||
+	     uid_eq(old->suid, root_uid)) &&
+	    (!uid_eq(new->uid, root_uid) &&
+	     !uid_eq(new->euid, root_uid) &&
+	     !uid_eq(new->suid, root_uid))) {
+		if (!issecure(SECURE_KEEP_CAPS)) {
+			cap_clear(new->cap_permitted);
+			cap_clear(new->cap_effective);
+		}
+
+		/*
+		 * Pre-ambient programs expect setresuid to nonroot followed
+		 * by exec to drop capabilities.  We should make sure that
+		 * this remains the case.
+		 */
+		cap_clear(new->cap_ambient);
+	}
+	if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid))
+		cap_clear(new->cap_effective);
+	if (!uid_eq(old->euid, root_uid) && uid_eq(new->euid, root_uid))
+		new->cap_effective = new->cap_permitted;
+}
+
+/**
+ * cap_task_fix_setuid - Fix up the results of setuid() call
+ * @new: The proposed credentials
+ * @old: The current task's current credentials
+ * @flags: Indications of what has changed
+ *
+ * Fix up the results of setuid() call before the credential changes are
+ * actually applied, returning 0 to grant the changes, -ve to deny them.
+ */
+int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
+{
+	switch (flags) {
+	case LSM_SETID_RE:
+	case LSM_SETID_ID:
+	case LSM_SETID_RES:
+		/* juggle the capabilities to follow [RES]UID changes unless
+		 * otherwise suppressed */
+		if (!issecure(SECURE_NO_SETUID_FIXUP))
+			cap_emulate_setxuid(new, old);
+		break;
+
+	case LSM_SETID_FS:
+		/* juggle the capabilties to follow FSUID changes, unless
+		 * otherwise suppressed
+		 *
+		 * FIXME - is fsuser used for all CAP_FS_MASK capabilities?
+		 *          if not, we might be a bit too harsh here.
+		 */
+		if (!issecure(SECURE_NO_SETUID_FIXUP)) {
+			kuid_t root_uid = make_kuid(old->user_ns, 0);
+			if (uid_eq(old->fsuid, root_uid) && !uid_eq(new->fsuid, root_uid))
+				new->cap_effective =
+					cap_drop_fs_set(new->cap_effective);
+
+			if (!uid_eq(old->fsuid, root_uid) && uid_eq(new->fsuid, root_uid))
+				new->cap_effective =
+					cap_raise_fs_set(new->cap_effective,
+							 new->cap_permitted);
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Rationale: code calling task_setscheduler, task_setioprio, and
+ * task_setnice, assumes that
+ *   . if capable(cap_sys_nice), then those actions should be allowed
+ *   . if not capable(cap_sys_nice), but acting on your own processes,
+ *   	then those actions should be allowed
+ * This is insufficient now since you can call code without suid, but
+ * yet with increased caps.
+ * So we check for increased caps on the target process.
+ */
+static int cap_safe_nice(struct task_struct *p)
+{
+	int is_subset, ret = 0;
+
+	rcu_read_lock();
+	is_subset = cap_issubset(__task_cred(p)->cap_permitted,
+				 current_cred()->cap_permitted);
+	if (!is_subset && !ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
+		ret = -EPERM;
+	rcu_read_unlock();
+
+	return ret;
+}
+
+/**
+ * cap_task_setscheduler - Detemine if scheduler policy change is permitted
+ * @p: The task to affect
+ *
+ * Detemine if the requested scheduler policy change is permitted for the
+ * specified task, returning 0 if permission is granted, -ve if denied.
+ */
+int cap_task_setscheduler(struct task_struct *p)
+{
+	return cap_safe_nice(p);
+}
+
+/**
+ * cap_task_ioprio - Detemine if I/O priority change is permitted
+ * @p: The task to affect
+ * @ioprio: The I/O priority to set
+ *
+ * Detemine if the requested I/O priority change is permitted for the specified
+ * task, returning 0 if permission is granted, -ve if denied.
+ */
+int cap_task_setioprio(struct task_struct *p, int ioprio)
+{
+	return cap_safe_nice(p);
+}
+
+/**
+ * cap_task_ioprio - Detemine if task priority change is permitted
+ * @p: The task to affect
+ * @nice: The nice value to set
+ *
+ * Detemine if the requested task priority change is permitted for the
+ * specified task, returning 0 if permission is granted, -ve if denied.
+ */
+int cap_task_setnice(struct task_struct *p, int nice)
+{
+	return cap_safe_nice(p);
+}
+
+/*
+ * Implement PR_CAPBSET_DROP.  Attempt to remove the specified capability from
+ * the current task's bounding set.  Returns 0 on success, -ve on error.
+ */
+static int cap_prctl_drop(unsigned long cap)
+{
+	struct cred *new;
+
+	if (!ns_capable(current_user_ns(), CAP_SETPCAP))
+		return -EPERM;
+	if (!cap_valid(cap))
+		return -EINVAL;
+
+	new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+	cap_lower(new->cap_bset, cap);
+	return commit_creds(new);
+}
+
+/**
+ * cap_task_prctl - Implement process control functions for this security module
+ * @option: The process control function requested
+ * @arg2, @arg3, @arg4, @arg5: The argument data for this function
+ *
+ * Allow process control functions (sys_prctl()) to alter capabilities; may
+ * also deny access to other functions not otherwise implemented here.
+ *
+ * Returns 0 or +ve on success, -ENOSYS if this function is not implemented
+ * here, other -ve on error.  If -ENOSYS is returned, sys_prctl() and other LSM
+ * modules will consider performing the function.
+ */
+int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+		   unsigned long arg4, unsigned long arg5)
+{
+	const struct cred *old = current_cred();
+	struct cred *new;
+
+	switch (option) {
+	case PR_CAPBSET_READ:
+		if (!cap_valid(arg2))
+			return -EINVAL;
+		return !!cap_raised(old->cap_bset, arg2);
+
+	case PR_CAPBSET_DROP:
+		return cap_prctl_drop(arg2);
+
+	/*
+	 * The next four prctl's remain to assist with transitioning a
+	 * system from legacy UID=0 based privilege (when filesystem
+	 * capabilities are not in use) to a system using filesystem
+	 * capabilities only - as the POSIX.1e draft intended.
+	 *
+	 * Note:
+	 *
+	 *  PR_SET_SECUREBITS =
+	 *      issecure_mask(SECURE_KEEP_CAPS_LOCKED)
+	 *    | issecure_mask(SECURE_NOROOT)
+	 *    | issecure_mask(SECURE_NOROOT_LOCKED)
+	 *    | issecure_mask(SECURE_NO_SETUID_FIXUP)
+	 *    | issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)
+	 *
+	 * will ensure that the current process and all of its
+	 * children will be locked into a pure
+	 * capability-based-privilege environment.
+	 */
+	case PR_SET_SECUREBITS:
+		if ((((old->securebits & SECURE_ALL_LOCKS) >> 1)
+		     & (old->securebits ^ arg2))			/*[1]*/
+		    || ((old->securebits & SECURE_ALL_LOCKS & ~arg2))	/*[2]*/
+		    || (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS))	/*[3]*/
+		    || (cap_capable(current_cred(),
+				    current_cred()->user_ns, CAP_SETPCAP,
+				    SECURITY_CAP_AUDIT) != 0)		/*[4]*/
+			/*
+			 * [1] no changing of bits that are locked
+			 * [2] no unlocking of locks
+			 * [3] no setting of unsupported bits
+			 * [4] doing anything requires privilege (go read about
+			 *     the "sendmail capabilities bug")
+			 */
+		    )
+			/* cannot change a locked bit */
+			return -EPERM;
+
+		new = prepare_creds();
+		if (!new)
+			return -ENOMEM;
+		new->securebits = arg2;
+		return commit_creds(new);
+
+	case PR_GET_SECUREBITS:
+		return old->securebits;
+
+	case PR_GET_KEEPCAPS:
+		return !!issecure(SECURE_KEEP_CAPS);
+
+	case PR_SET_KEEPCAPS:
+		if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */
+			return -EINVAL;
+		if (issecure(SECURE_KEEP_CAPS_LOCKED))
+			return -EPERM;
+
+		new = prepare_creds();
+		if (!new)
+			return -ENOMEM;
+		if (arg2)
+			new->securebits |= issecure_mask(SECURE_KEEP_CAPS);
+		else
+			new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
+		return commit_creds(new);
+
+	case PR_CAP_AMBIENT:
+		if (arg2 == PR_CAP_AMBIENT_CLEAR_ALL) {
+			if (arg3 | arg4 | arg5)
+				return -EINVAL;
+
+			new = prepare_creds();
+			if (!new)
+				return -ENOMEM;
+			cap_clear(new->cap_ambient);
+			return commit_creds(new);
+		}
+
+		if (((!cap_valid(arg3)) | arg4 | arg5))
+			return -EINVAL;
+
+		if (arg2 == PR_CAP_AMBIENT_IS_SET) {
+			return !!cap_raised(current_cred()->cap_ambient, arg3);
+		} else if (arg2 != PR_CAP_AMBIENT_RAISE &&
+			   arg2 != PR_CAP_AMBIENT_LOWER) {
+			return -EINVAL;
+		} else {
+			if (arg2 == PR_CAP_AMBIENT_RAISE &&
+			    (!cap_raised(current_cred()->cap_permitted, arg3) ||
+			     !cap_raised(current_cred()->cap_inheritable,
+					 arg3) ||
+			     issecure(SECURE_NO_CAP_AMBIENT_RAISE)))
+				return -EPERM;
+
+			new = prepare_creds();
+			if (!new)
+				return -ENOMEM;
+			if (arg2 == PR_CAP_AMBIENT_RAISE)
+				cap_raise(new->cap_ambient, arg3);
+			else
+				cap_lower(new->cap_ambient, arg3);
+			return commit_creds(new);
+		}
+
+	default:
+		/* No functionality available - continue with default */
+		return -ENOSYS;
+	}
+}
+
+/**
+ * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted
+ * @mm: The VM space in which the new mapping is to be made
+ * @pages: The size of the mapping
+ *
+ * Determine whether the allocation of a new virtual mapping by the current
+ * task is permitted, returning 1 if permission is granted, 0 if not.
+ */
+int cap_vm_enough_memory(struct mm_struct *mm, long pages)
+{
+	int cap_sys_admin = 0;
+
+	if (cap_capable(current_cred(), &init_user_ns, CAP_SYS_ADMIN,
+			SECURITY_CAP_NOAUDIT) == 0)
+		cap_sys_admin = 1;
+	return cap_sys_admin;
+}
+
+/*
+ * cap_mmap_addr - check if able to map given addr
+ * @addr: address attempting to be mapped
+ *
+ * If the process is attempting to map memory below dac_mmap_min_addr they need
+ * CAP_SYS_RAWIO.  The other parameters to this function are unused by the
+ * capability security module.  Returns 0 if this mapping should be allowed
+ * -EPERM if not.
+ */
+int cap_mmap_addr(unsigned long addr)
+{
+	int ret = 0;
+
+	if (addr < dac_mmap_min_addr) {
+		ret = cap_capable(current_cred(), &init_user_ns, CAP_SYS_RAWIO,
+				  SECURITY_CAP_AUDIT);
+		/* set PF_SUPERPRIV if it turns out we allow the low mmap */
+		if (ret == 0)
+			current->flags |= PF_SUPERPRIV;
+	}
+	return ret;
+}
+
+int cap_mmap_file(struct file *file, unsigned long reqprot,
+		  unsigned long prot, unsigned long flags)
+{
+	return 0;
+}
+
+#ifdef CONFIG_SECURITY
+
+struct security_hook_list capability_hooks[] = {
+	LSM_HOOK_INIT(capable, cap_capable),
+	LSM_HOOK_INIT(settime, cap_settime),
+	LSM_HOOK_INIT(ptrace_access_check, cap_ptrace_access_check),
+	LSM_HOOK_INIT(ptrace_traceme, cap_ptrace_traceme),
+	LSM_HOOK_INIT(capget, cap_capget),
+	LSM_HOOK_INIT(capset, cap_capset),
+	LSM_HOOK_INIT(bprm_set_creds, cap_bprm_set_creds),
+	LSM_HOOK_INIT(bprm_secureexec, cap_bprm_secureexec),
+	LSM_HOOK_INIT(inode_need_killpriv, cap_inode_need_killpriv),
+	LSM_HOOK_INIT(inode_killpriv, cap_inode_killpriv),
+	LSM_HOOK_INIT(mmap_addr, cap_mmap_addr),
+	LSM_HOOK_INIT(mmap_file, cap_mmap_file),
+	LSM_HOOK_INIT(task_fix_setuid, cap_task_fix_setuid),
+	LSM_HOOK_INIT(task_prctl, cap_task_prctl),
+	LSM_HOOK_INIT(task_setscheduler, cap_task_setscheduler),
+	LSM_HOOK_INIT(task_setioprio, cap_task_setioprio),
+	LSM_HOOK_INIT(task_setnice, cap_task_setnice),
+	LSM_HOOK_INIT(vm_enough_memory, cap_vm_enough_memory),
+};
+
+void __init capability_add_hooks(void)
+{
+	security_add_hooks(capability_hooks, ARRAY_SIZE(capability_hooks));
+}
+
+#endif /* CONFIG_SECURITY */
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
new file mode 100644
index 0000000..03c1652
--- /dev/null
+++ b/security/device_cgroup.c
@@ -0,0 +1,868 @@
+/*
+ * device_cgroup.c - device cgroup subsystem
+ *
+ * Copyright 2007 IBM Corp
+ */
+
+#include <linux/device_cgroup.h>
+#include <linux/cgroup.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/mutex.h>
+
+#define ACC_MKNOD 1
+#define ACC_READ  2
+#define ACC_WRITE 4
+#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
+
+#define DEV_BLOCK 1
+#define DEV_CHAR  2
+#define DEV_ALL   4  /* this represents all devices */
+
+static DEFINE_MUTEX(devcgroup_mutex);
+
+enum devcg_behavior {
+	DEVCG_DEFAULT_NONE,
+	DEVCG_DEFAULT_ALLOW,
+	DEVCG_DEFAULT_DENY,
+};
+
+/*
+ * exception list locking rules:
+ * hold devcgroup_mutex for update/read.
+ * hold rcu_read_lock() for read.
+ */
+
+struct dev_exception_item {
+	u32 major, minor;
+	short type;
+	short access;
+	struct list_head list;
+	struct rcu_head rcu;
+};
+
+struct dev_cgroup {
+	struct cgroup_subsys_state css;
+	struct list_head exceptions;
+	enum devcg_behavior behavior;
+};
+
+static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
+{
+	return s ? container_of(s, struct dev_cgroup, css) : NULL;
+}
+
+static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
+{
+	return css_to_devcgroup(task_css(task, devices_cgrp_id));
+}
+
+/*
+ * called under devcgroup_mutex
+ */
+static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
+{
+	struct dev_exception_item *ex, *tmp, *new;
+
+	lockdep_assert_held(&devcgroup_mutex);
+
+	list_for_each_entry(ex, orig, list) {
+		new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
+		if (!new)
+			goto free_and_exit;
+		list_add_tail(&new->list, dest);
+	}
+
+	return 0;
+
+free_and_exit:
+	list_for_each_entry_safe(ex, tmp, dest, list) {
+		list_del(&ex->list);
+		kfree(ex);
+	}
+	return -ENOMEM;
+}
+
+/*
+ * called under devcgroup_mutex
+ */
+static int dev_exception_add(struct dev_cgroup *dev_cgroup,
+			     struct dev_exception_item *ex)
+{
+	struct dev_exception_item *excopy, *walk;
+
+	lockdep_assert_held(&devcgroup_mutex);
+
+	excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
+	if (!excopy)
+		return -ENOMEM;
+
+	list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
+		if (walk->type != ex->type)
+			continue;
+		if (walk->major != ex->major)
+			continue;
+		if (walk->minor != ex->minor)
+			continue;
+
+		walk->access |= ex->access;
+		kfree(excopy);
+		excopy = NULL;
+	}
+
+	if (excopy != NULL)
+		list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
+	return 0;
+}
+
+/*
+ * called under devcgroup_mutex
+ */
+static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
+			     struct dev_exception_item *ex)
+{
+	struct dev_exception_item *walk, *tmp;
+
+	lockdep_assert_held(&devcgroup_mutex);
+
+	list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
+		if (walk->type != ex->type)
+			continue;
+		if (walk->major != ex->major)
+			continue;
+		if (walk->minor != ex->minor)
+			continue;
+
+		walk->access &= ~ex->access;
+		if (!walk->access) {
+			list_del_rcu(&walk->list);
+			kfree_rcu(walk, rcu);
+		}
+	}
+}
+
+static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
+{
+	struct dev_exception_item *ex, *tmp;
+
+	list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
+		list_del_rcu(&ex->list);
+		kfree_rcu(ex, rcu);
+	}
+}
+
+/**
+ * dev_exception_clean - frees all entries of the exception list
+ * @dev_cgroup: dev_cgroup with the exception list to be cleaned
+ *
+ * called under devcgroup_mutex
+ */
+static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
+{
+	lockdep_assert_held(&devcgroup_mutex);
+
+	__dev_exception_clean(dev_cgroup);
+}
+
+static inline bool is_devcg_online(const struct dev_cgroup *devcg)
+{
+	return (devcg->behavior != DEVCG_DEFAULT_NONE);
+}
+
+/**
+ * devcgroup_online - initializes devcgroup's behavior and exceptions based on
+ * 		      parent's
+ * @css: css getting online
+ * returns 0 in case of success, error code otherwise
+ */
+static int devcgroup_online(struct cgroup_subsys_state *css)
+{
+	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+	struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent);
+	int ret = 0;
+
+	mutex_lock(&devcgroup_mutex);
+
+	if (parent_dev_cgroup == NULL)
+		dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
+	else {
+		ret = dev_exceptions_copy(&dev_cgroup->exceptions,
+					  &parent_dev_cgroup->exceptions);
+		if (!ret)
+			dev_cgroup->behavior = parent_dev_cgroup->behavior;
+	}
+	mutex_unlock(&devcgroup_mutex);
+
+	return ret;
+}
+
+static void devcgroup_offline(struct cgroup_subsys_state *css)
+{
+	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+
+	mutex_lock(&devcgroup_mutex);
+	dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
+	mutex_unlock(&devcgroup_mutex);
+}
+
+/*
+ * called from kernel/cgroup.c with cgroup_lock() held.
+ */
+static struct cgroup_subsys_state *
+devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+	struct dev_cgroup *dev_cgroup;
+
+	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
+	if (!dev_cgroup)
+		return ERR_PTR(-ENOMEM);
+	INIT_LIST_HEAD(&dev_cgroup->exceptions);
+	dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
+
+	return &dev_cgroup->css;
+}
+
+static void devcgroup_css_free(struct cgroup_subsys_state *css)
+{
+	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+
+	__dev_exception_clean(dev_cgroup);
+	kfree(dev_cgroup);
+}
+
+#define DEVCG_ALLOW 1
+#define DEVCG_DENY 2
+#define DEVCG_LIST 3
+
+#define MAJMINLEN 13
+#define ACCLEN 4
+
+static void set_access(char *acc, short access)
+{
+	int idx = 0;
+	memset(acc, 0, ACCLEN);
+	if (access & ACC_READ)
+		acc[idx++] = 'r';
+	if (access & ACC_WRITE)
+		acc[idx++] = 'w';
+	if (access & ACC_MKNOD)
+		acc[idx++] = 'm';
+}
+
+static char type_to_char(short type)
+{
+	if (type == DEV_ALL)
+		return 'a';
+	if (type == DEV_CHAR)
+		return 'c';
+	if (type == DEV_BLOCK)
+		return 'b';
+	return 'X';
+}
+
+static void set_majmin(char *str, unsigned m)
+{
+	if (m == ~0)
+		strcpy(str, "*");
+	else
+		sprintf(str, "%u", m);
+}
+
+static int devcgroup_seq_show(struct seq_file *m, void *v)
+{
+	struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
+	struct dev_exception_item *ex;
+	char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
+
+	rcu_read_lock();
+	/*
+	 * To preserve the compatibility:
+	 * - Only show the "all devices" when the default policy is to allow
+	 * - List the exceptions in case the default policy is to deny
+	 * This way, the file remains as a "whitelist of devices"
+	 */
+	if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+		set_access(acc, ACC_MASK);
+		set_majmin(maj, ~0);
+		set_majmin(min, ~0);
+		seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
+			   maj, min, acc);
+	} else {
+		list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
+			set_access(acc, ex->access);
+			set_majmin(maj, ex->major);
+			set_majmin(min, ex->minor);
+			seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
+				   maj, min, acc);
+		}
+	}
+	rcu_read_unlock();
+
+	return 0;
+}
+
+/**
+ * match_exception	- iterates the exception list trying to find a complete match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a complete match if an exception is found that will
+ * contain the entire range of provided parameters.
+ *
+ * Return: true in case it matches an exception completely
+ */
+static bool match_exception(struct list_head *exceptions, short type,
+			    u32 major, u32 minor, short access)
+{
+	struct dev_exception_item *ex;
+
+	list_for_each_entry_rcu(ex, exceptions, list) {
+		if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+			continue;
+		if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+			continue;
+		if (ex->major != ~0 && ex->major != major)
+			continue;
+		if (ex->minor != ~0 && ex->minor != minor)
+			continue;
+		/* provided access cannot have more than the exception rule */
+		if (access & (~ex->access))
+			continue;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * match_exception_partial - iterates the exception list trying to find a partial match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a partial match if an exception's range is found to
+ * contain *any* of the devices specified by provided parameters. This is
+ * used to make sure no extra access is being granted that is forbidden by
+ * any of the exception list.
+ *
+ * Return: true in case the provided range mat matches an exception completely
+ */
+static bool match_exception_partial(struct list_head *exceptions, short type,
+				    u32 major, u32 minor, short access)
+{
+	struct dev_exception_item *ex;
+
+	list_for_each_entry_rcu(ex, exceptions, list) {
+		if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+			continue;
+		if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+			continue;
+		/*
+		 * We must be sure that both the exception and the provided
+		 * range aren't masking all devices
+		 */
+		if (ex->major != ~0 && major != ~0 && ex->major != major)
+			continue;
+		if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
+			continue;
+		/*
+		 * In order to make sure the provided range isn't matching
+		 * an exception, all its access bits shouldn't match the
+		 * exception's access bits
+		 */
+		if (!(access & ex->access))
+			continue;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
+ * @dev_cgroup: dev cgroup to be tested against
+ * @refex: new exception
+ * @behavior: behavior of the exception's dev_cgroup
+ *
+ * This is used to make sure a child cgroup won't have more privileges
+ * than its parent
+ */
+static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
+		          struct dev_exception_item *refex,
+		          enum devcg_behavior behavior)
+{
+	bool match = false;
+
+	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+			 !lockdep_is_held(&devcgroup_mutex),
+			 "device_cgroup:verify_new_ex called without proper synchronization");
+
+	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+		if (behavior == DEVCG_DEFAULT_ALLOW) {
+			/*
+			 * new exception in the child doesn't matter, only
+			 * adding extra restrictions
+			 */ 
+			return true;
+		} else {
+			/*
+			 * new exception in the child will add more devices
+			 * that can be acessed, so it can't match any of
+			 * parent's exceptions, even slightly
+			 */ 
+			match = match_exception_partial(&dev_cgroup->exceptions,
+							refex->type,
+							refex->major,
+							refex->minor,
+							refex->access);
+
+			if (match)
+				return false;
+			return true;
+		}
+	} else {
+		/*
+		 * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
+		 * the new exception will add access to more devices and must
+		 * be contained completely in an parent's exception to be
+		 * allowed
+		 */
+		match = match_exception(&dev_cgroup->exceptions, refex->type,
+					refex->major, refex->minor,
+					refex->access);
+
+		if (match)
+			/* parent has an exception that matches the proposed */
+			return true;
+		else
+			return false;
+	}
+	return false;
+}
+
+/*
+ * parent_has_perm:
+ * when adding a new allow rule to a device exception list, the rule
+ * must be allowed in the parent device
+ */
+static int parent_has_perm(struct dev_cgroup *childcg,
+				  struct dev_exception_item *ex)
+{
+	struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
+
+	if (!parent)
+		return 1;
+	return verify_new_ex(parent, ex, childcg->behavior);
+}
+
+/**
+ * parent_allows_removal - verify if it's ok to remove an exception
+ * @childcg: child cgroup from where the exception will be removed
+ * @ex: exception being removed
+ *
+ * When removing an exception in cgroups with default ALLOW policy, it must
+ * be checked if removing it will give the child cgroup more access than the
+ * parent.
+ *
+ * Return: true if it's ok to remove exception, false otherwise
+ */
+static bool parent_allows_removal(struct dev_cgroup *childcg,
+				  struct dev_exception_item *ex)
+{
+	struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
+
+	if (!parent)
+		return true;
+
+	/* It's always allowed to remove access to devices */
+	if (childcg->behavior == DEVCG_DEFAULT_DENY)
+		return true;
+
+	/*
+	 * Make sure you're not removing part or a whole exception existing in
+	 * the parent cgroup
+	 */
+	return !match_exception_partial(&parent->exceptions, ex->type,
+					ex->major, ex->minor, ex->access);
+}
+
+/**
+ * may_allow_all - checks if it's possible to change the behavior to
+ *		   allow based on parent's rules.
+ * @parent: device cgroup's parent
+ * returns: != 0 in case it's allowed, 0 otherwise
+ */
+static inline int may_allow_all(struct dev_cgroup *parent)
+{
+	if (!parent)
+		return 1;
+	return parent->behavior == DEVCG_DEFAULT_ALLOW;
+}
+
+/**
+ * revalidate_active_exceptions - walks through the active exception list and
+ * 				  revalidates the exceptions based on parent's
+ * 				  behavior and exceptions. The exceptions that
+ * 				  are no longer valid will be removed.
+ * 				  Called with devcgroup_mutex held.
+ * @devcg: cgroup which exceptions will be checked
+ *
+ * This is one of the three key functions for hierarchy implementation.
+ * This function is responsible for re-evaluating all the cgroup's active
+ * exceptions due to a parent's exception change.
+ * Refer to Documentation/cgroups/devices.txt for more details.
+ */
+static void revalidate_active_exceptions(struct dev_cgroup *devcg)
+{
+	struct dev_exception_item *ex;
+	struct list_head *this, *tmp;
+
+	list_for_each_safe(this, tmp, &devcg->exceptions) {
+		ex = container_of(this, struct dev_exception_item, list);
+		if (!parent_has_perm(devcg, ex))
+			dev_exception_rm(devcg, ex);
+	}
+}
+
+/**
+ * propagate_exception - propagates a new exception to the children
+ * @devcg_root: device cgroup that added a new exception
+ * @ex: new exception to be propagated
+ *
+ * returns: 0 in case of success, != 0 in case of error
+ */
+static int propagate_exception(struct dev_cgroup *devcg_root,
+			       struct dev_exception_item *ex)
+{
+	struct cgroup_subsys_state *pos;
+	int rc = 0;
+
+	rcu_read_lock();
+
+	css_for_each_descendant_pre(pos, &devcg_root->css) {
+		struct dev_cgroup *devcg = css_to_devcgroup(pos);
+
+		/*
+		 * Because devcgroup_mutex is held, no devcg will become
+		 * online or offline during the tree walk (see on/offline
+		 * methods), and online ones are safe to access outside RCU
+		 * read lock without bumping refcnt.
+		 */
+		if (pos == &devcg_root->css || !is_devcg_online(devcg))
+			continue;
+
+		rcu_read_unlock();
+
+		/*
+		 * in case both root's behavior and devcg is allow, a new
+		 * restriction means adding to the exception list
+		 */
+		if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
+		    devcg->behavior == DEVCG_DEFAULT_ALLOW) {
+			rc = dev_exception_add(devcg, ex);
+			if (rc)
+				break;
+		} else {
+			/*
+			 * in the other possible cases:
+			 * root's behavior: allow, devcg's: deny
+			 * root's behavior: deny, devcg's: deny
+			 * the exception will be removed
+			 */
+			dev_exception_rm(devcg, ex);
+		}
+		revalidate_active_exceptions(devcg);
+
+		rcu_read_lock();
+	}
+
+	rcu_read_unlock();
+	return rc;
+}
+
+/*
+ * Modify the exception list using allow/deny rules.
+ * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
+ * so we can give a container CAP_MKNOD to let it create devices but not
+ * modify the exception list.
+ * It seems likely we'll want to add a CAP_CONTAINER capability to allow
+ * us to also grant CAP_SYS_ADMIN to containers without giving away the
+ * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
+ *
+ * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
+ * new access is only allowed if you're in the top-level cgroup, or your
+ * parent cgroup has the access you're asking for.
+ */
+static int devcgroup_update_access(struct dev_cgroup *devcgroup,
+				   int filetype, char *buffer)
+{
+	const char *b;
+	char temp[12];		/* 11 + 1 characters needed for a u32 */
+	int count, rc = 0;
+	struct dev_exception_item ex;
+	struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	memset(&ex, 0, sizeof(ex));
+	b = buffer;
+
+	switch (*b) {
+	case 'a':
+		switch (filetype) {
+		case DEVCG_ALLOW:
+			if (css_has_online_children(&devcgroup->css))
+				return -EINVAL;
+
+			if (!may_allow_all(parent))
+				return -EPERM;
+			dev_exception_clean(devcgroup);
+			devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+			if (!parent)
+				break;
+
+			rc = dev_exceptions_copy(&devcgroup->exceptions,
+						 &parent->exceptions);
+			if (rc)
+				return rc;
+			break;
+		case DEVCG_DENY:
+			if (css_has_online_children(&devcgroup->css))
+				return -EINVAL;
+
+			dev_exception_clean(devcgroup);
+			devcgroup->behavior = DEVCG_DEFAULT_DENY;
+			break;
+		default:
+			return -EINVAL;
+		}
+		return 0;
+	case 'b':
+		ex.type = DEV_BLOCK;
+		break;
+	case 'c':
+		ex.type = DEV_CHAR;
+		break;
+	default:
+		return -EINVAL;
+	}
+	b++;
+	if (!isspace(*b))
+		return -EINVAL;
+	b++;
+	if (*b == '*') {
+		ex.major = ~0;
+		b++;
+	} else if (isdigit(*b)) {
+		memset(temp, 0, sizeof(temp));
+		for (count = 0; count < sizeof(temp) - 1; count++) {
+			temp[count] = *b;
+			b++;
+			if (!isdigit(*b))
+				break;
+		}
+		rc = kstrtou32(temp, 10, &ex.major);
+		if (rc)
+			return -EINVAL;
+	} else {
+		return -EINVAL;
+	}
+	if (*b != ':')
+		return -EINVAL;
+	b++;
+
+	/* read minor */
+	if (*b == '*') {
+		ex.minor = ~0;
+		b++;
+	} else if (isdigit(*b)) {
+		memset(temp, 0, sizeof(temp));
+		for (count = 0; count < sizeof(temp) - 1; count++) {
+			temp[count] = *b;
+			b++;
+			if (!isdigit(*b))
+				break;
+		}
+		rc = kstrtou32(temp, 10, &ex.minor);
+		if (rc)
+			return -EINVAL;
+	} else {
+		return -EINVAL;
+	}
+	if (!isspace(*b))
+		return -EINVAL;
+	for (b++, count = 0; count < 3; count++, b++) {
+		switch (*b) {
+		case 'r':
+			ex.access |= ACC_READ;
+			break;
+		case 'w':
+			ex.access |= ACC_WRITE;
+			break;
+		case 'm':
+			ex.access |= ACC_MKNOD;
+			break;
+		case '\n':
+		case '\0':
+			count = 3;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	switch (filetype) {
+	case DEVCG_ALLOW:
+		/*
+		 * If the default policy is to allow by default, try to remove
+		 * an matching exception instead. And be silent about it: we
+		 * don't want to break compatibility
+		 */
+		if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+			/* Check if the parent allows removing it first */
+			if (!parent_allows_removal(devcgroup, &ex))
+				return -EPERM;
+			dev_exception_rm(devcgroup, &ex);
+			break;
+		}
+
+		if (!parent_has_perm(devcgroup, &ex))
+			return -EPERM;
+		rc = dev_exception_add(devcgroup, &ex);
+		break;
+	case DEVCG_DENY:
+		/*
+		 * If the default policy is to deny by default, try to remove
+		 * an matching exception instead. And be silent about it: we
+		 * don't want to break compatibility
+		 */
+		if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
+			dev_exception_rm(devcgroup, &ex);
+		else
+			rc = dev_exception_add(devcgroup, &ex);
+
+		if (rc)
+			break;
+		/* we only propagate new restrictions */
+		rc = propagate_exception(devcgroup, &ex);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static ssize_t devcgroup_access_write(struct kernfs_open_file *of,
+				      char *buf, size_t nbytes, loff_t off)
+{
+	int retval;
+
+	mutex_lock(&devcgroup_mutex);
+	retval = devcgroup_update_access(css_to_devcgroup(of_css(of)),
+					 of_cft(of)->private, strstrip(buf));
+	mutex_unlock(&devcgroup_mutex);
+	return retval ?: nbytes;
+}
+
+static struct cftype dev_cgroup_files[] = {
+	{
+		.name = "allow",
+		.write = devcgroup_access_write,
+		.private = DEVCG_ALLOW,
+	},
+	{
+		.name = "deny",
+		.write = devcgroup_access_write,
+		.private = DEVCG_DENY,
+	},
+	{
+		.name = "list",
+		.seq_show = devcgroup_seq_show,
+		.private = DEVCG_LIST,
+	},
+	{ }	/* terminate */
+};
+
+struct cgroup_subsys devices_cgrp_subsys = {
+	.css_alloc = devcgroup_css_alloc,
+	.css_free = devcgroup_css_free,
+	.css_online = devcgroup_online,
+	.css_offline = devcgroup_offline,
+	.legacy_cftypes = dev_cgroup_files,
+};
+
+/**
+ * __devcgroup_check_permission - checks if an inode operation is permitted
+ * @dev_cgroup: the dev cgroup to be tested against
+ * @type: device type
+ * @major: device major number
+ * @minor: device minor number
+ * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
+ *
+ * returns 0 on success, -EPERM case the operation is not permitted
+ */
+static int __devcgroup_check_permission(short type, u32 major, u32 minor,
+				        short access)
+{
+	struct dev_cgroup *dev_cgroup;
+	bool rc;
+
+	rcu_read_lock();
+	dev_cgroup = task_devcgroup(current);
+	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
+		/* Can't match any of the exceptions, even partially */
+		rc = !match_exception_partial(&dev_cgroup->exceptions,
+					      type, major, minor, access);
+	else
+		/* Need to match completely one exception to be allowed */
+		rc = match_exception(&dev_cgroup->exceptions, type, major,
+				     minor, access);
+	rcu_read_unlock();
+
+	if (!rc)
+		return -EPERM;
+
+	return 0;
+}
+
+int __devcgroup_inode_permission(struct inode *inode, int mask)
+{
+	short type, access = 0;
+
+	if (S_ISBLK(inode->i_mode))
+		type = DEV_BLOCK;
+	if (S_ISCHR(inode->i_mode))
+		type = DEV_CHAR;
+	if (mask & MAY_WRITE)
+		access |= ACC_WRITE;
+	if (mask & MAY_READ)
+		access |= ACC_READ;
+
+	return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
+			access);
+}
+
+int devcgroup_inode_mknod(int mode, dev_t dev)
+{
+	short type;
+
+	if (!S_ISBLK(mode) && !S_ISCHR(mode))
+		return 0;
+
+	if (S_ISBLK(mode))
+		type = DEV_BLOCK;
+	else
+		type = DEV_CHAR;
+
+	return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
+			ACC_MKNOD);
+
+}
diff --git a/security/inode.c b/security/inode.c
new file mode 100644
index 0000000..16622ae
--- /dev/null
+++ b/security/inode.c
@@ -0,0 +1,227 @@
+/*
+ *  inode.c - securityfs
+ *
+ *  Copyright (C) 2005 Greg Kroah-Hartman <gregkh@suse.de>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License version
+ *	2 as published by the Free Software Foundation.
+ *
+ *  Based on fs/debugfs/inode.c which had the following copyright notice:
+ *    Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
+ *    Copyright (C) 2004 IBM Inc.
+ */
+
+/* #define DEBUG */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include <linux/magic.h>
+
+static struct vfsmount *mount;
+static int mount_count;
+
+static int fill_super(struct super_block *sb, void *data, int silent)
+{
+	static struct tree_descr files[] = {{""}};
+
+	return simple_fill_super(sb, SECURITYFS_MAGIC, files);
+}
+
+static struct dentry *get_sb(struct file_system_type *fs_type,
+		  int flags, const char *dev_name,
+		  void *data)
+{
+	return mount_single(fs_type, flags, data, fill_super);
+}
+
+static struct file_system_type fs_type = {
+	.owner =	THIS_MODULE,
+	.name =		"securityfs",
+	.mount =	get_sb,
+	.kill_sb =	kill_litter_super,
+};
+
+/**
+ * securityfs_create_file - create a file in the securityfs filesystem
+ *
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this parameter is %NULL, then the
+ *          file will be created in the root of the securityfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ *        on.  The inode.i_private pointer will point to this value on
+ *        the open() call.
+ * @fops: a pointer to a struct file_operations that should be used for
+ *        this file.
+ *
+ * This is the basic "create a file" function for securityfs.  It allows for a
+ * wide range of flexibility in creating a file, or a directory (if you
+ * want to create a directory, the securityfs_create_dir() function is
+ * recommended to be used instead).
+ *
+ * This function returns a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the securityfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here).  If an error occurs, the function will return
+ * the error value (via ERR_PTR).
+ *
+ * If securityfs is not enabled in the kernel, the value %-ENODEV is
+ * returned.
+ */
+struct dentry *securityfs_create_file(const char *name, umode_t mode,
+				   struct dentry *parent, void *data,
+				   const struct file_operations *fops)
+{
+	struct dentry *dentry;
+	int is_dir = S_ISDIR(mode);
+	struct inode *dir, *inode;
+	int error;
+
+	if (!is_dir) {
+		BUG_ON(!fops);
+		mode = (mode & S_IALLUGO) | S_IFREG;
+	}
+
+	pr_debug("securityfs: creating file '%s'\n",name);
+
+	error = simple_pin_fs(&fs_type, &mount, &mount_count);
+	if (error)
+		return ERR_PTR(error);
+
+	if (!parent)
+		parent = mount->mnt_root;
+
+	dir = d_inode(parent);
+
+	mutex_lock(&dir->i_mutex);
+	dentry = lookup_one_len(name, parent, strlen(name));
+	if (IS_ERR(dentry))
+		goto out;
+
+	if (d_really_is_positive(dentry)) {
+		error = -EEXIST;
+		goto out1;
+	}
+
+	inode = new_inode(dir->i_sb);
+	if (!inode) {
+		error = -ENOMEM;
+		goto out1;
+	}
+
+	inode->i_ino = get_next_ino();
+	inode->i_mode = mode;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_private = data;
+	if (is_dir) {
+		inode->i_op = &simple_dir_inode_operations;
+		inode->i_fop = &simple_dir_operations;
+		inc_nlink(inode);
+		inc_nlink(dir);
+	} else {
+		inode->i_fop = fops;
+	}
+	d_instantiate(dentry, inode);
+	dget(dentry);
+	mutex_unlock(&dir->i_mutex);
+	return dentry;
+
+out1:
+	dput(dentry);
+	dentry = ERR_PTR(error);
+out:
+	mutex_unlock(&dir->i_mutex);
+	simple_release_fs(&mount, &mount_count);
+	return dentry;
+}
+EXPORT_SYMBOL_GPL(securityfs_create_file);
+
+/**
+ * securityfs_create_dir - create a directory in the securityfs filesystem
+ *
+ * @name: a pointer to a string containing the name of the directory to
+ *        create.
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this parameter is %NULL, then the
+ *          directory will be created in the root of the securityfs filesystem.
+ *
+ * This function creates a directory in securityfs with the given @name.
+ *
+ * This function returns a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the securityfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here).  If an error occurs, %NULL will be returned.
+ *
+ * If securityfs is not enabled in the kernel, the value %-ENODEV is
+ * returned.  It is not wise to check for this value, but rather, check for
+ * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
+ * code.
+ */
+struct dentry *securityfs_create_dir(const char *name, struct dentry *parent)
+{
+	return securityfs_create_file(name,
+				      S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
+				      parent, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(securityfs_create_dir);
+
+/**
+ * securityfs_remove - removes a file or directory from the securityfs filesystem
+ *
+ * @dentry: a pointer to a the dentry of the file or directory to be removed.
+ *
+ * This function removes a file or directory in securityfs that was previously
+ * created with a call to another securityfs function (like
+ * securityfs_create_file() or variants thereof.)
+ *
+ * This function is required to be called in order for the file to be
+ * removed. No automatic cleanup of files will happen when a module is
+ * removed; you are responsible here.
+ */
+void securityfs_remove(struct dentry *dentry)
+{
+	struct dentry *parent;
+
+	if (!dentry || IS_ERR(dentry))
+		return;
+
+	parent = dentry->d_parent;
+	if (!parent || d_really_is_negative(parent))
+		return;
+
+	mutex_lock(&d_inode(parent)->i_mutex);
+	if (simple_positive(dentry)) {
+		if (d_is_dir(dentry))
+			simple_rmdir(d_inode(parent), dentry);
+		else
+			simple_unlink(d_inode(parent), dentry);
+		dput(dentry);
+	}
+	mutex_unlock(&d_inode(parent)->i_mutex);
+	simple_release_fs(&mount, &mount_count);
+}
+EXPORT_SYMBOL_GPL(securityfs_remove);
+
+static int __init securityfs_init(void)
+{
+	int retval;
+
+	retval = sysfs_create_mount_point(kernel_kobj, "security");
+	if (retval)
+		return retval;
+
+	retval = register_filesystem(&fs_type);
+	if (retval)
+		sysfs_remove_mount_point(kernel_kobj, "security");
+	return retval;
+}
+
+core_initcall(securityfs_init);
+MODULE_LICENSE("GPL");
+
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
new file mode 100644
index 0000000..73c457b
--- /dev/null
+++ b/security/integrity/Kconfig
@@ -0,0 +1,62 @@
+#
+config INTEGRITY
+	bool "Integrity subsystem"
+	depends on SECURITY
+	default y
+	help
+	  This option enables the integrity subsystem, which is comprised
+	  of a number of different components including the Integrity
+	  Measurement Architecture (IMA), Extended Verification Module
+	  (EVM), IMA-appraisal extension, digital signature verification
+	  extension and audit measurement log support.
+
+	  Each of these components can be enabled/disabled separately.
+	  Refer to the individual components for additional details.
+
+if INTEGRITY
+
+config INTEGRITY_SIGNATURE
+	bool "Digital signature verification using multiple keyrings"
+	depends on KEYS
+	default n
+	select SIGNATURE
+	help
+	  This option enables digital signature verification support
+	  using multiple keyrings. It defines separate keyrings for each
+	  of the different use cases - evm, ima, and modules.
+	  Different keyrings improves search performance, but also allow
+	  to "lock" certain keyring to prevent adding new keys.
+	  This is useful for evm and module keyrings, when keys are
+	  usually only added from initramfs.
+
+config INTEGRITY_ASYMMETRIC_KEYS
+	bool "Enable asymmetric keys support"
+	depends on INTEGRITY_SIGNATURE
+	default n
+        select ASYMMETRIC_KEY_TYPE
+        select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+        select PUBLIC_KEY_ALGO_RSA
+        select X509_CERTIFICATE_PARSER
+	help
+	  This option enables digital signature verification using
+	  asymmetric keys.
+
+config INTEGRITY_AUDIT
+	bool "Enables integrity auditing support "
+	depends on AUDIT
+	default y
+	help
+	  In addition to enabling integrity auditing support, this
+	  option adds a kernel parameter 'integrity_audit', which
+	  controls the level of integrity auditing messages.
+	  0 - basic integrity auditing messages (default)
+	  1 - additional integrity auditing messages
+
+	  Additional informational integrity auditing messages would
+	  be enabled by specifying 'integrity_audit=1' on the kernel
+	  command line.
+
+source security/integrity/ima/Kconfig
+source security/integrity/evm/Kconfig
+
+endif   # if INTEGRITY
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
new file mode 100644
index 0000000..8d1f4bf
--- /dev/null
+++ b/security/integrity/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for caching inode integrity data (iint)
+#
+
+obj-$(CONFIG_INTEGRITY) += integrity.o
+
+integrity-y := iint.o
+integrity-$(CONFIG_INTEGRITY_AUDIT) += integrity_audit.o
+integrity-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
+integrity-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
+
+subdir-$(CONFIG_IMA)			+= ima
+obj-$(CONFIG_IMA)			+= ima/
+subdir-$(CONFIG_EVM)			+= evm
+obj-$(CONFIG_EVM)			+= evm/
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
new file mode 100644
index 0000000..5be9ffb
--- /dev/null
+++ b/security/integrity/digsig.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/cred.h>
+#include <linux/key-type.h>
+#include <linux/digsig.h>
+
+#include "integrity.h"
+
+static struct key *keyring[INTEGRITY_KEYRING_MAX];
+
+static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
+	"_evm",
+	"_module",
+#ifndef CONFIG_IMA_TRUSTED_KEYRING
+	"_ima",
+#else
+	".ima",
+#endif
+};
+
+int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+			    const char *digest, int digestlen)
+{
+	if (id >= INTEGRITY_KEYRING_MAX)
+		return -EINVAL;
+
+	if (!keyring[id]) {
+		keyring[id] =
+			request_key(&key_type_keyring, keyring_name[id], NULL);
+		if (IS_ERR(keyring[id])) {
+			int err = PTR_ERR(keyring[id]);
+			pr_err("no %s keyring: %d\n", keyring_name[id], err);
+			keyring[id] = NULL;
+			return err;
+		}
+	}
+
+	switch (sig[1]) {
+	case 1:
+		/* v1 API expect signature without xattr type */
+		return digsig_verify(keyring[id], sig + 1, siglen - 1,
+				     digest, digestlen);
+	case 2:
+		return asymmetric_verify(keyring[id], sig, siglen,
+					 digest, digestlen);
+	}
+
+	return -EOPNOTSUPP;
+}
+
+int __init integrity_init_keyring(const unsigned int id)
+{
+	const struct cred *cred = current_cred();
+	int err = 0;
+
+	keyring[id] = keyring_alloc(keyring_name[id], KUIDT_INIT(0),
+				    KGIDT_INIT(0), cred,
+				    ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+				     KEY_USR_VIEW | KEY_USR_READ |
+				     KEY_USR_WRITE | KEY_USR_SEARCH),
+				    KEY_ALLOC_NOT_IN_QUOTA, NULL);
+	if (!IS_ERR(keyring[id]))
+		set_bit(KEY_FLAG_TRUSTED_ONLY, &keyring[id]->flags);
+	else {
+		err = PTR_ERR(keyring[id]);
+		pr_info("Can't allocate %s keyring (%d)\n",
+			keyring_name[id], err);
+		keyring[id] = NULL;
+	}
+	return err;
+}
+
+int __init integrity_load_x509(const unsigned int id, const char *path)
+{
+	key_ref_t key;
+	char *data;
+	int rc;
+
+	if (!keyring[id])
+		return -EINVAL;
+
+	rc = integrity_read_file(path, &data);
+	if (rc < 0)
+		return rc;
+
+	key = key_create_or_update(make_key_ref(keyring[id], 1),
+				   "asymmetric",
+				   NULL,
+				   data,
+				   rc,
+				   ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+				    KEY_USR_VIEW | KEY_USR_READ),
+				   KEY_ALLOC_NOT_IN_QUOTA);
+	if (IS_ERR(key)) {
+		rc = PTR_ERR(key);
+		pr_err("Problem loading X.509 certificate (%d): %s\n",
+		       rc, path);
+	} else {
+		pr_notice("Loaded X.509 cert '%s': %s\n",
+			  key_ref_to_ptr(key)->description, path);
+		key_ref_put(key);
+	}
+	kfree(data);
+	return 0;
+}
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
new file mode 100644
index 0000000..4fec181
--- /dev/null
+++ b/security/integrity/digsig_asymmetric.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/ratelimit.h>
+#include <linux/key-type.h>
+#include <crypto/public_key.h>
+#include <keys/asymmetric-type.h>
+
+#include "integrity.h"
+
+/*
+ * Request an asymmetric key.
+ */
+static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
+{
+	struct key *key;
+	char name[12];
+
+	sprintf(name, "id:%08x", keyid);
+
+	pr_debug("key search: \"%s\"\n", name);
+
+	if (keyring) {
+		/* search in specific keyring */
+		key_ref_t kref;
+		kref = keyring_search(make_key_ref(keyring, 1),
+				      &key_type_asymmetric, name);
+		if (IS_ERR(kref))
+			key = ERR_CAST(kref);
+		else
+			key = key_ref_to_ptr(kref);
+	} else {
+		key = request_key(&key_type_asymmetric, name, NULL);
+	}
+
+	if (IS_ERR(key)) {
+		pr_err_ratelimited("Request for unknown key '%s' err %ld\n",
+				   name, PTR_ERR(key));
+		switch (PTR_ERR(key)) {
+			/* Hide some search errors */
+		case -EACCES:
+		case -ENOTDIR:
+		case -EAGAIN:
+			return ERR_PTR(-ENOKEY);
+		default:
+			return key;
+		}
+	}
+
+	pr_debug("%s() = 0 [%x]\n", __func__, key_serial(key));
+
+	return key;
+}
+
+int asymmetric_verify(struct key *keyring, const char *sig,
+		      int siglen, const char *data, int datalen)
+{
+	struct public_key_signature pks;
+	struct signature_v2_hdr *hdr = (struct signature_v2_hdr *)sig;
+	struct key *key;
+	int ret = -ENOMEM;
+
+	if (siglen <= sizeof(*hdr))
+		return -EBADMSG;
+
+	siglen -= sizeof(*hdr);
+
+	if (siglen != __be16_to_cpu(hdr->sig_size))
+		return -EBADMSG;
+
+	if (hdr->hash_algo >= PKEY_HASH__LAST)
+		return -ENOPKG;
+
+	key = request_asymmetric_key(keyring, __be32_to_cpu(hdr->keyid));
+	if (IS_ERR(key))
+		return PTR_ERR(key);
+
+	memset(&pks, 0, sizeof(pks));
+
+	pks.pkey_hash_algo = hdr->hash_algo;
+	pks.digest = (u8 *)data;
+	pks.digest_size = datalen;
+	pks.nr_mpi = 1;
+	pks.rsa.s = mpi_read_raw_data(hdr->sig, siglen);
+
+	if (pks.rsa.s)
+		ret = verify_signature(key, &pks);
+
+	mpi_free(pks.rsa.s);
+	key_put(key);
+	pr_debug("%s() = %d\n", __func__, ret);
+	return ret;
+}
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
new file mode 100644
index 0000000..bf19723
--- /dev/null
+++ b/security/integrity/evm/Kconfig
@@ -0,0 +1,44 @@
+config EVM
+	bool "EVM support"
+	select KEYS
+	select ENCRYPTED_KEYS
+	select CRYPTO_HMAC
+	select CRYPTO_SHA1
+	default n
+	help
+	  EVM protects a file's security extended attributes against
+	  integrity attacks.
+
+	  If you are unsure how to answer this question, answer N.
+
+config EVM_ATTR_FSUUID
+	bool "FSUUID (version 2)"
+	default y
+	depends on EVM
+	help
+	  Include filesystem UUID for HMAC calculation.
+
+	  Default value is 'selected', which is former version 2.
+	  if 'not selected', it is former version 1
+
+	  WARNING: changing the HMAC calculation method or adding
+	  additional info to the calculation, requires existing EVM
+	  labeled file systems to be relabeled.
+
+config EVM_EXTRA_SMACK_XATTRS
+	bool "Additional SMACK xattrs"
+	depends on EVM && SECURITY_SMACK
+	default n
+	help
+	  Include additional SMACK xattrs for HMAC calculation.
+
+	  In addition to the original security xattrs (eg. security.selinux,
+	  security.SMACK64, security.capability, and security.ima) included
+	  in the HMAC calculation, enabling this option includes newly defined
+	  Smack xattrs: security.SMACK64EXEC, security.SMACK64TRANSMUTE and
+	  security.SMACK64MMAP.
+
+	  WARNING: changing the HMAC calculation method or adding
+	  additional info to the calculation, requires existing EVM
+	  labeled file systems to be relabeled.
+
diff --git a/security/integrity/evm/Makefile b/security/integrity/evm/Makefile
new file mode 100644
index 0000000..7393c41
--- /dev/null
+++ b/security/integrity/evm/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for building the Extended Verification Module(EVM)
+#
+obj-$(CONFIG_EVM) += evm.o
+
+evm-y := evm_main.o evm_crypto.o evm_secfs.o
+evm-$(CONFIG_FS_POSIX_ACL) += evm_posix_acl.o
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
new file mode 100644
index 0000000..88bfe77
--- /dev/null
+++ b/security/integrity/evm/evm.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm.h
+ *
+ */
+
+#ifndef __INTEGRITY_EVM_H
+#define __INTEGRITY_EVM_H
+
+#include <linux/xattr.h>
+#include <linux/security.h>
+
+#include "../integrity.h"
+
+extern int evm_initialized;
+extern char *evm_hmac;
+extern char *evm_hash;
+
+#define EVM_ATTR_FSUUID		0x0001
+
+extern int evm_hmac_attrs;
+
+extern struct crypto_shash *hmac_tfm;
+extern struct crypto_shash *hash_tfm;
+
+/* List of EVM protected security xattrs */
+extern char *evm_config_xattrnames[];
+
+int evm_init_key(void);
+int evm_update_evmxattr(struct dentry *dentry,
+			const char *req_xattr_name,
+			const char *req_xattr_value,
+			size_t req_xattr_value_len);
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+		  const char *req_xattr_value,
+		  size_t req_xattr_value_len, char *digest);
+int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+		  const char *req_xattr_value,
+		  size_t req_xattr_value_len, char *digest);
+int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
+		  char *hmac_val);
+int evm_init_secfs(void);
+
+#endif
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
new file mode 100644
index 0000000..461f8d8
--- /dev/null
+++ b/security/integrity/evm/evm_crypto.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_crypto.c
+ *	 Using root's kernel master key (kmk), calculate the HMAC
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xattr.h>
+#include <keys/encrypted-type.h>
+#include <crypto/hash.h>
+#include "evm.h"
+
+#define EVMKEY "evm-key"
+#define MAX_KEY_SIZE 128
+static unsigned char evmkey[MAX_KEY_SIZE];
+static int evmkey_len = MAX_KEY_SIZE;
+
+struct crypto_shash *hmac_tfm;
+struct crypto_shash *hash_tfm;
+
+static DEFINE_MUTEX(mutex);
+
+static struct shash_desc *init_desc(char type)
+{
+	long rc;
+	char *algo;
+	struct crypto_shash **tfm;
+	struct shash_desc *desc;
+
+	if (type == EVM_XATTR_HMAC) {
+		tfm = &hmac_tfm;
+		algo = evm_hmac;
+	} else {
+		tfm = &hash_tfm;
+		algo = evm_hash;
+	}
+
+	if (*tfm == NULL) {
+		mutex_lock(&mutex);
+		if (*tfm)
+			goto out;
+		*tfm = crypto_alloc_shash(algo, 0, CRYPTO_ALG_ASYNC);
+		if (IS_ERR(*tfm)) {
+			rc = PTR_ERR(*tfm);
+			pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
+			*tfm = NULL;
+			mutex_unlock(&mutex);
+			return ERR_PTR(rc);
+		}
+		if (type == EVM_XATTR_HMAC) {
+			rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len);
+			if (rc) {
+				crypto_free_shash(*tfm);
+				*tfm = NULL;
+				mutex_unlock(&mutex);
+				return ERR_PTR(rc);
+			}
+		}
+out:
+		mutex_unlock(&mutex);
+	}
+
+	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
+			GFP_KERNEL);
+	if (!desc)
+		return ERR_PTR(-ENOMEM);
+
+	desc->tfm = *tfm;
+	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	rc = crypto_shash_init(desc);
+	if (rc) {
+		kfree(desc);
+		return ERR_PTR(rc);
+	}
+	return desc;
+}
+
+/* Protect against 'cutting & pasting' security.evm xattr, include inode
+ * specific info.
+ *
+ * (Additional directory/file metadata needs to be added for more complete
+ * protection.)
+ */
+static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
+			  char *digest)
+{
+	struct h_misc {
+		unsigned long ino;
+		__u32 generation;
+		uid_t uid;
+		gid_t gid;
+		umode_t mode;
+	} hmac_misc;
+
+	memset(&hmac_misc, 0, sizeof(hmac_misc));
+	hmac_misc.ino = inode->i_ino;
+	hmac_misc.generation = inode->i_generation;
+	hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
+	hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
+	hmac_misc.mode = inode->i_mode;
+	crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
+	if (evm_hmac_attrs & EVM_ATTR_FSUUID)
+		crypto_shash_update(desc, inode->i_sb->s_uuid,
+				    sizeof(inode->i_sb->s_uuid));
+	crypto_shash_final(desc, digest);
+}
+
+/*
+ * Calculate the HMAC value across the set of protected security xattrs.
+ *
+ * Instead of retrieving the requested xattr, for performance, calculate
+ * the hmac using the requested xattr value. Don't alloc/free memory for
+ * each xattr, but attempt to re-use the previously allocated memory.
+ */
+static int evm_calc_hmac_or_hash(struct dentry *dentry,
+				const char *req_xattr_name,
+				const char *req_xattr_value,
+				size_t req_xattr_value_len,
+				char type, char *digest)
+{
+	struct inode *inode = d_backing_inode(dentry);
+	struct shash_desc *desc;
+	char **xattrname;
+	size_t xattr_size = 0;
+	char *xattr_value = NULL;
+	int error;
+	int size;
+
+	if (!inode->i_op->getxattr)
+		return -EOPNOTSUPP;
+	desc = init_desc(type);
+	if (IS_ERR(desc))
+		return PTR_ERR(desc);
+
+	error = -ENODATA;
+	for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+		if ((req_xattr_name && req_xattr_value)
+		    && !strcmp(*xattrname, req_xattr_name)) {
+			error = 0;
+			crypto_shash_update(desc, (const u8 *)req_xattr_value,
+					     req_xattr_value_len);
+			continue;
+		}
+		size = vfs_getxattr_alloc(dentry, *xattrname,
+					  &xattr_value, xattr_size, GFP_NOFS);
+		if (size == -ENOMEM) {
+			error = -ENOMEM;
+			goto out;
+		}
+		if (size < 0)
+			continue;
+
+		error = 0;
+		xattr_size = size;
+		crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
+	}
+	hmac_add_misc(desc, inode, digest);
+
+out:
+	kfree(xattr_value);
+	kfree(desc);
+	return error;
+}
+
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+		  const char *req_xattr_value, size_t req_xattr_value_len,
+		  char *digest)
+{
+	return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
+				req_xattr_value_len, EVM_XATTR_HMAC, digest);
+}
+
+int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+		  const char *req_xattr_value, size_t req_xattr_value_len,
+		  char *digest)
+{
+	return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
+				req_xattr_value_len, IMA_XATTR_DIGEST, digest);
+}
+
+/*
+ * Calculate the hmac and update security.evm xattr
+ *
+ * Expects to be called with i_mutex locked.
+ */
+int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+			const char *xattr_value, size_t xattr_value_len)
+{
+	struct inode *inode = d_backing_inode(dentry);
+	struct evm_ima_xattr_data xattr_data;
+	int rc = 0;
+
+	rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+			   xattr_value_len, xattr_data.digest);
+	if (rc == 0) {
+		xattr_data.type = EVM_XATTR_HMAC;
+		rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
+					   &xattr_data,
+					   sizeof(xattr_data), 0);
+	} else if (rc == -ENODATA && inode->i_op->removexattr) {
+		rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
+	}
+	return rc;
+}
+
+int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
+		  char *hmac_val)
+{
+	struct shash_desc *desc;
+
+	desc = init_desc(EVM_XATTR_HMAC);
+	if (IS_ERR(desc)) {
+		pr_info("init_desc failed\n");
+		return PTR_ERR(desc);
+	}
+
+	crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
+	hmac_add_misc(desc, inode, hmac_val);
+	kfree(desc);
+	return 0;
+}
+
+/*
+ * Get the key from the TPM for the SHA1-HMAC
+ */
+int evm_init_key(void)
+{
+	struct key *evm_key;
+	struct encrypted_key_payload *ekp;
+	int rc = 0;
+
+	evm_key = request_key(&key_type_encrypted, EVMKEY, NULL);
+	if (IS_ERR(evm_key))
+		return -ENOENT;
+
+	down_read(&evm_key->sem);
+	ekp = evm_key->payload.data[0];
+	if (ekp->decrypted_datalen > MAX_KEY_SIZE) {
+		rc = -EINVAL;
+		goto out;
+	}
+	memcpy(evmkey, ekp->decrypted_data, ekp->decrypted_datalen);
+out:
+	/* burn the original key contents */
+	memset(ekp->decrypted_data, 0, ekp->decrypted_datalen);
+	up_read(&evm_key->sem);
+	key_put(evm_key);
+	return rc;
+}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
new file mode 100644
index 0000000..3d145a3
--- /dev/null
+++ b/security/integrity/evm/evm_main.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_main.c
+ *	implements evm_inode_setxattr, evm_inode_post_setxattr,
+ *	evm_inode_removexattr, and evm_verifyxattr
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/audit.h>
+#include <linux/xattr.h>
+#include <linux/integrity.h>
+#include <linux/evm.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include "evm.h"
+
+int evm_initialized;
+
+static char *integrity_status_msg[] = {
+	"pass", "fail", "no_label", "no_xattrs", "unknown"
+};
+char *evm_hmac = "hmac(sha1)";
+char *evm_hash = "sha1";
+int evm_hmac_attrs;
+
+char *evm_config_xattrnames[] = {
+#ifdef CONFIG_SECURITY_SELINUX
+	XATTR_NAME_SELINUX,
+#endif
+#ifdef CONFIG_SECURITY_SMACK
+	XATTR_NAME_SMACK,
+#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
+	XATTR_NAME_SMACKEXEC,
+	XATTR_NAME_SMACKTRANSMUTE,
+	XATTR_NAME_SMACKMMAP,
+#endif
+#endif
+#ifdef CONFIG_IMA_APPRAISE
+	XATTR_NAME_IMA,
+#endif
+	XATTR_NAME_CAPS,
+	NULL
+};
+
+static int evm_fixmode;
+static int __init evm_set_fixmode(char *str)
+{
+	if (strncmp(str, "fix", 3) == 0)
+		evm_fixmode = 1;
+	return 0;
+}
+__setup("evm=", evm_set_fixmode);
+
+static void __init evm_init_config(void)
+{
+#ifdef CONFIG_EVM_ATTR_FSUUID
+	evm_hmac_attrs |= EVM_ATTR_FSUUID;
+#endif
+	pr_info("HMAC attrs: 0x%x\n", evm_hmac_attrs);
+}
+
+static int evm_find_protected_xattrs(struct dentry *dentry)
+{
+	struct inode *inode = d_backing_inode(dentry);
+	char **xattr;
+	int error;
+	int count = 0;
+
+	if (!inode->i_op->getxattr)
+		return -EOPNOTSUPP;
+
+	for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) {
+		error = inode->i_op->getxattr(dentry, *xattr, NULL, 0);
+		if (error < 0) {
+			if (error == -ENODATA)
+				continue;
+			return error;
+		}
+		count++;
+	}
+
+	return count;
+}
+
+/*
+ * evm_verify_hmac - calculate and compare the HMAC with the EVM xattr
+ *
+ * Compute the HMAC on the dentry's protected set of extended attributes
+ * and compare it against the stored security.evm xattr.
+ *
+ * For performance:
+ * - use the previoulsy retrieved xattr value and length to calculate the
+ *   HMAC.)
+ * - cache the verification result in the iint, when available.
+ *
+ * Returns integrity status
+ */
+static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+					     const char *xattr_name,
+					     char *xattr_value,
+					     size_t xattr_value_len,
+					     struct integrity_iint_cache *iint)
+{
+	struct evm_ima_xattr_data *xattr_data = NULL;
+	struct evm_ima_xattr_data calc;
+	enum integrity_status evm_status = INTEGRITY_PASS;
+	int rc, xattr_len;
+
+	if (iint && iint->evm_status == INTEGRITY_PASS)
+		return iint->evm_status;
+
+	/* if status is not PASS, try to check again - against -ENOMEM */
+
+	/* first need to know the sig type */
+	rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0,
+				GFP_NOFS);
+	if (rc <= 0) {
+		evm_status = INTEGRITY_FAIL;
+		if (rc == -ENODATA) {
+			rc = evm_find_protected_xattrs(dentry);
+			if (rc > 0)
+				evm_status = INTEGRITY_NOLABEL;
+			else if (rc == 0)
+				evm_status = INTEGRITY_NOXATTRS; /* new file */
+		} else if (rc == -EOPNOTSUPP) {
+			evm_status = INTEGRITY_UNKNOWN;
+		}
+		goto out;
+	}
+
+	xattr_len = rc;
+
+	/* check value type */
+	switch (xattr_data->type) {
+	case EVM_XATTR_HMAC:
+		rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+				   xattr_value_len, calc.digest);
+		if (rc)
+			break;
+		rc = crypto_memneq(xattr_data->digest, calc.digest,
+			    sizeof(calc.digest));
+		if (rc)
+			rc = -EINVAL;
+		break;
+	case EVM_IMA_XATTR_DIGSIG:
+		rc = evm_calc_hash(dentry, xattr_name, xattr_value,
+				xattr_value_len, calc.digest);
+		if (rc)
+			break;
+		rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
+					(const char *)xattr_data, xattr_len,
+					calc.digest, sizeof(calc.digest));
+		if (!rc) {
+			/* Replace RSA with HMAC if not mounted readonly and
+			 * not immutable
+			 */
+			if (!IS_RDONLY(d_backing_inode(dentry)) &&
+			    !IS_IMMUTABLE(d_backing_inode(dentry)))
+				evm_update_evmxattr(dentry, xattr_name,
+						    xattr_value,
+						    xattr_value_len);
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc)
+		evm_status = (rc == -ENODATA) ?
+				INTEGRITY_NOXATTRS : INTEGRITY_FAIL;
+out:
+	if (iint)
+		iint->evm_status = evm_status;
+	kfree(xattr_data);
+	return evm_status;
+}
+
+static int evm_protected_xattr(const char *req_xattr_name)
+{
+	char **xattrname;
+	int namelen;
+	int found = 0;
+
+	namelen = strlen(req_xattr_name);
+	for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+		if ((strlen(*xattrname) == namelen)
+		    && (strncmp(req_xattr_name, *xattrname, namelen) == 0)) {
+			found = 1;
+			break;
+		}
+		if (strncmp(req_xattr_name,
+			    *xattrname + XATTR_SECURITY_PREFIX_LEN,
+			    strlen(req_xattr_name)) == 0) {
+			found = 1;
+			break;
+		}
+	}
+	return found;
+}
+
+/**
+ * evm_verifyxattr - verify the integrity of the requested xattr
+ * @dentry: object of the verify xattr
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ *
+ * Calculate the HMAC for the given dentry and verify it against the stored
+ * security.evm xattr. For performance, use the xattr value and length
+ * previously retrieved to calculate the HMAC.
+ *
+ * Returns the xattr integrity status.
+ *
+ * This function requires the caller to lock the inode's i_mutex before it
+ * is executed.
+ */
+enum integrity_status evm_verifyxattr(struct dentry *dentry,
+				      const char *xattr_name,
+				      void *xattr_value, size_t xattr_value_len,
+				      struct integrity_iint_cache *iint)
+{
+	if (!evm_initialized || !evm_protected_xattr(xattr_name))
+		return INTEGRITY_UNKNOWN;
+
+	if (!iint) {
+		iint = integrity_iint_find(d_backing_inode(dentry));
+		if (!iint)
+			return INTEGRITY_UNKNOWN;
+	}
+	return evm_verify_hmac(dentry, xattr_name, xattr_value,
+				 xattr_value_len, iint);
+}
+EXPORT_SYMBOL_GPL(evm_verifyxattr);
+
+/*
+ * evm_verify_current_integrity - verify the dentry's metadata integrity
+ * @dentry: pointer to the affected dentry
+ *
+ * Verify and return the dentry's metadata integrity. The exceptions are
+ * before EVM is initialized or in 'fix' mode.
+ */
+static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
+{
+	struct inode *inode = d_backing_inode(dentry);
+
+	if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode)
+		return 0;
+	return evm_verify_hmac(dentry, NULL, NULL, 0, NULL);
+}
+
+/*
+ * evm_protect_xattr - protect the EVM extended attribute
+ *
+ * Prevent security.evm from being modified or removed without the
+ * necessary permissions or when the existing value is invalid.
+ *
+ * The posix xattr acls are 'system' prefixed, which normally would not
+ * affect security.evm.  An interesting side affect of writing posix xattr
+ * acls is their modifying of the i_mode, which is included in security.evm.
+ * For posix xattr acls only, permit security.evm, even if it currently
+ * doesn't exist, to be updated.
+ */
+static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+			     const void *xattr_value, size_t xattr_value_len)
+{
+	enum integrity_status evm_status;
+
+	if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+	} else if (!evm_protected_xattr(xattr_name)) {
+		if (!posix_xattr_acl(xattr_name))
+			return 0;
+		evm_status = evm_verify_current_integrity(dentry);
+		if ((evm_status == INTEGRITY_PASS) ||
+		    (evm_status == INTEGRITY_NOXATTRS))
+			return 0;
+		goto out;
+	}
+	evm_status = evm_verify_current_integrity(dentry);
+	if (evm_status == INTEGRITY_NOXATTRS) {
+		struct integrity_iint_cache *iint;
+
+		iint = integrity_iint_find(d_backing_inode(dentry));
+		if (iint && (iint->flags & IMA_NEW_FILE))
+			return 0;
+
+		/* exception for pseudo filesystems */
+		if (dentry->d_inode->i_sb->s_magic == TMPFS_MAGIC
+		    || dentry->d_inode->i_sb->s_magic == SYSFS_MAGIC)
+			return 0;
+
+		integrity_audit_msg(AUDIT_INTEGRITY_METADATA,
+				    dentry->d_inode, dentry->d_name.name,
+				    "update_metadata",
+				    integrity_status_msg[evm_status],
+				    -EPERM, 0);
+	}
+out:
+	if (evm_status != INTEGRITY_PASS)
+		integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
+				    dentry->d_name.name, "appraise_metadata",
+				    integrity_status_msg[evm_status],
+				    -EPERM, 0);
+	return evm_status == INTEGRITY_PASS ? 0 : -EPERM;
+}
+
+/**
+ * evm_inode_setxattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Before allowing the 'security.evm' protected xattr to be updated,
+ * verify the existing value is valid.  As only the kernel should have
+ * access to the EVM encrypted key needed to calculate the HMAC, prevent
+ * userspace from writing HMAC value.  Writing 'security.evm' requires
+ * requires CAP_SYS_ADMIN privileges.
+ */
+int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+		       const void *xattr_value, size_t xattr_value_len)
+{
+	const struct evm_ima_xattr_data *xattr_data = xattr_value;
+
+	if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+		if (!xattr_value_len)
+			return -EINVAL;
+		if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
+			return -EPERM;
+	}
+	return evm_protect_xattr(dentry, xattr_name, xattr_value,
+				 xattr_value_len);
+}
+
+/**
+ * evm_inode_removexattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that
+ * the current value is valid.
+ */
+int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+	return evm_protect_xattr(dentry, xattr_name, NULL, 0);
+}
+
+/**
+ * evm_inode_post_setxattr - update 'security.evm' to reflect the changes
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Update the HMAC stored in 'security.evm' to reflect the change.
+ *
+ * No need to take the i_mutex lock here, as this function is called from
+ * __vfs_setxattr_noperm().  The caller of which has taken the inode's
+ * i_mutex lock.
+ */
+void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
+			     const void *xattr_value, size_t xattr_value_len)
+{
+	if (!evm_initialized || (!evm_protected_xattr(xattr_name)
+				 && !posix_xattr_acl(xattr_name)))
+		return;
+
+	evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
+}
+
+/**
+ * evm_inode_post_removexattr - update 'security.evm' after removing the xattr
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Update the HMAC stored in 'security.evm' to reflect removal of the xattr.
+ *
+ * No need to take the i_mutex lock here, as this function is called from
+ * vfs_removexattr() which takes the i_mutex.
+ */
+void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+	if (!evm_initialized || !evm_protected_xattr(xattr_name))
+		return;
+
+	evm_update_evmxattr(dentry, xattr_name, NULL, 0);
+}
+
+/**
+ * evm_inode_setattr - prevent updating an invalid EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ */
+int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	unsigned int ia_valid = attr->ia_valid;
+	enum integrity_status evm_status;
+
+	if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
+		return 0;
+	evm_status = evm_verify_current_integrity(dentry);
+	if ((evm_status == INTEGRITY_PASS) ||
+	    (evm_status == INTEGRITY_NOXATTRS))
+		return 0;
+	integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
+			    dentry->d_name.name, "appraise_metadata",
+			    integrity_status_msg[evm_status], -EPERM, 0);
+	return -EPERM;
+}
+
+/**
+ * evm_inode_post_setattr - update 'security.evm' after modifying metadata
+ * @dentry: pointer to the affected dentry
+ * @ia_valid: for the UID and GID status
+ *
+ * For now, update the HMAC stored in 'security.evm' to reflect UID/GID
+ * changes.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+	if (!evm_initialized)
+		return;
+
+	if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
+		evm_update_evmxattr(dentry, NULL, NULL, 0);
+}
+
+/*
+ * evm_inode_init_security - initializes security.evm
+ */
+int evm_inode_init_security(struct inode *inode,
+				 const struct xattr *lsm_xattr,
+				 struct xattr *evm_xattr)
+{
+	struct evm_ima_xattr_data *xattr_data;
+	int rc;
+
+	if (!evm_initialized || !evm_protected_xattr(lsm_xattr->name))
+		return 0;
+
+	xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
+	if (!xattr_data)
+		return -ENOMEM;
+
+	xattr_data->type = EVM_XATTR_HMAC;
+	rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest);
+	if (rc < 0)
+		goto out;
+
+	evm_xattr->value = xattr_data;
+	evm_xattr->value_len = sizeof(*xattr_data);
+	evm_xattr->name = XATTR_EVM_SUFFIX;
+	return 0;
+out:
+	kfree(xattr_data);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(evm_inode_init_security);
+
+static int __init init_evm(void)
+{
+	int error;
+
+	evm_init_config();
+
+	error = evm_init_secfs();
+	if (error < 0) {
+		pr_info("Error registering secfs\n");
+		goto err;
+	}
+
+	return 0;
+err:
+	return error;
+}
+
+/*
+ * evm_display_config - list the EVM protected security extended attributes
+ */
+static int __init evm_display_config(void)
+{
+	char **xattrname;
+
+	for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++)
+		pr_info("%s\n", *xattrname);
+	return 0;
+}
+
+pure_initcall(evm_display_config);
+late_initcall(init_evm);
+
+MODULE_DESCRIPTION("Extended Verification Module");
+MODULE_LICENSE("GPL");
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
new file mode 100644
index 0000000..46408b9
--- /dev/null
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
+
+int posix_xattr_acl(const char *xattr)
+{
+	int xattr_len = strlen(xattr);
+
+	if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len)
+	     && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0))
+		return 1;
+	if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len)
+	     && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0))
+		return 1;
+	return 0;
+}
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
new file mode 100644
index 0000000..cf12a04
--- /dev/null
+++ b/security/integrity/evm/evm_secfs.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_secfs.c
+ *	- Used to signal when key is on keyring
+ *	- Get the key and enable EVM
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include "evm.h"
+
+static struct dentry *evm_init_tpm;
+
+/**
+ * evm_read_key - read() for <securityfs>/evm
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_key(struct file *filp, char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	char temp[80];
+	ssize_t rc;
+
+	if (*ppos != 0)
+		return 0;
+
+	sprintf(temp, "%d", evm_initialized);
+	rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+	return rc;
+}
+
+/**
+ * evm_write_key - write() for <securityfs>/evm
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Used to signal that key is on the kernel key ring.
+ * - get the integrity hmac key from the kernel key ring
+ * - create list of hmac protected extended attributes
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_key(struct file *file, const char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	char temp[80];
+	int i, error;
+
+	if (!capable(CAP_SYS_ADMIN) || evm_initialized)
+		return -EPERM;
+
+	if (count >= sizeof(temp) || count == 0)
+		return -EINVAL;
+
+	if (copy_from_user(temp, buf, count) != 0)
+		return -EFAULT;
+
+	temp[count] = '\0';
+
+	if ((sscanf(temp, "%d", &i) != 1) || (i != 1))
+		return -EINVAL;
+
+	error = evm_init_key();
+	if (!error) {
+		evm_initialized = 1;
+		pr_info("initialized\n");
+	} else
+		pr_err("initialization failed\n");
+	return count;
+}
+
+static const struct file_operations evm_key_ops = {
+	.read		= evm_read_key,
+	.write		= evm_write_key,
+};
+
+int __init evm_init_secfs(void)
+{
+	int error = 0;
+
+	evm_init_tpm = securityfs_create_file("evm", S_IRUSR | S_IRGRP,
+					      NULL, NULL, &evm_key_ops);
+	if (!evm_init_tpm || IS_ERR(evm_init_tpm))
+		error = -EFAULT;
+	return error;
+}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
new file mode 100644
index 0000000..3d2f5b4
--- /dev/null
+++ b/security/integrity/iint.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: integrity_iint.c
+ *	- implements the integrity hooks: integrity_inode_alloc,
+ *	  integrity_inode_free
+ *	- cache integrity information associated with an inode
+ *	  using a rbtree tree.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/rbtree.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include "integrity.h"
+
+static struct rb_root integrity_iint_tree = RB_ROOT;
+static DEFINE_RWLOCK(integrity_iint_lock);
+static struct kmem_cache *iint_cache __read_mostly;
+
+/*
+ * __integrity_iint_find - return the iint associated with an inode
+ */
+static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
+{
+	struct integrity_iint_cache *iint;
+	struct rb_node *n = integrity_iint_tree.rb_node;
+
+	while (n) {
+		iint = rb_entry(n, struct integrity_iint_cache, rb_node);
+
+		if (inode < iint->inode)
+			n = n->rb_left;
+		else if (inode > iint->inode)
+			n = n->rb_right;
+		else
+			break;
+	}
+	if (!n)
+		return NULL;
+
+	return iint;
+}
+
+/*
+ * integrity_iint_find - return the iint associated with an inode
+ */
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+{
+	struct integrity_iint_cache *iint;
+
+	if (!IS_IMA(inode))
+		return NULL;
+
+	read_lock(&integrity_iint_lock);
+	iint = __integrity_iint_find(inode);
+	read_unlock(&integrity_iint_lock);
+
+	return iint;
+}
+
+static void iint_free(struct integrity_iint_cache *iint)
+{
+	kfree(iint->ima_hash);
+	iint->ima_hash = NULL;
+	iint->version = 0;
+	iint->flags = 0UL;
+	iint->ima_file_status = INTEGRITY_UNKNOWN;
+	iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+	iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+	iint->ima_module_status = INTEGRITY_UNKNOWN;
+	iint->evm_status = INTEGRITY_UNKNOWN;
+	kmem_cache_free(iint_cache, iint);
+}
+
+/**
+ * integrity_inode_get - find or allocate an iint associated with an inode
+ * @inode: pointer to the inode
+ * @return: allocated iint
+ *
+ * Caller must lock i_mutex
+ */
+struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+{
+	struct rb_node **p;
+	struct rb_node *node, *parent = NULL;
+	struct integrity_iint_cache *iint, *test_iint;
+
+	iint = integrity_iint_find(inode);
+	if (iint)
+		return iint;
+
+	iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
+	if (!iint)
+		return NULL;
+
+	write_lock(&integrity_iint_lock);
+
+	p = &integrity_iint_tree.rb_node;
+	while (*p) {
+		parent = *p;
+		test_iint = rb_entry(parent, struct integrity_iint_cache,
+				     rb_node);
+		if (inode < test_iint->inode)
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
+	}
+
+	iint->inode = inode;
+	node = &iint->rb_node;
+	inode->i_flags |= S_IMA;
+	rb_link_node(node, parent, p);
+	rb_insert_color(node, &integrity_iint_tree);
+
+	write_unlock(&integrity_iint_lock);
+	return iint;
+}
+
+/**
+ * integrity_inode_free - called on security_inode_free
+ * @inode: pointer to the inode
+ *
+ * Free the integrity information(iint) associated with an inode.
+ */
+void integrity_inode_free(struct inode *inode)
+{
+	struct integrity_iint_cache *iint;
+
+	if (!IS_IMA(inode))
+		return;
+
+	write_lock(&integrity_iint_lock);
+	iint = __integrity_iint_find(inode);
+	rb_erase(&iint->rb_node, &integrity_iint_tree);
+	write_unlock(&integrity_iint_lock);
+
+	iint_free(iint);
+}
+
+static void init_once(void *foo)
+{
+	struct integrity_iint_cache *iint = foo;
+
+	memset(iint, 0, sizeof(*iint));
+	iint->version = 0;
+	iint->flags = 0UL;
+	iint->ima_file_status = INTEGRITY_UNKNOWN;
+	iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+	iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+	iint->ima_module_status = INTEGRITY_UNKNOWN;
+	iint->evm_status = INTEGRITY_UNKNOWN;
+}
+
+static int __init integrity_iintcache_init(void)
+{
+	iint_cache =
+	    kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+			      0, SLAB_PANIC, init_once);
+	return 0;
+}
+security_initcall(integrity_iintcache_init);
+
+
+/*
+ * integrity_kernel_read - read data from the file
+ *
+ * This is a function for reading file content instead of kernel_read().
+ * It does not perform locking checks to ensure it cannot be blocked.
+ * It does not perform security checks because it is irrelevant for IMA.
+ *
+ */
+int integrity_kernel_read(struct file *file, loff_t offset,
+			  char *addr, unsigned long count)
+{
+	mm_segment_t old_fs;
+	char __user *buf = (char __user *)addr;
+	ssize_t ret;
+
+	if (!(file->f_mode & FMODE_READ))
+		return -EBADF;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = __vfs_read(file, buf, count, &offset);
+	set_fs(old_fs);
+
+	return ret;
+}
+
+/*
+ * integrity_read_file - read entire file content into the buffer
+ *
+ * This is function opens a file, allocates the buffer of required
+ * size, read entire file content to the buffer and closes the file
+ *
+ * It is used only by init code.
+ *
+ */
+int __init integrity_read_file(const char *path, char **data)
+{
+	struct file *file;
+	loff_t size;
+	char *buf;
+	int rc = -EINVAL;
+
+	if (!path || !*path)
+		return -EINVAL;
+
+	file = filp_open(path, O_RDONLY, 0);
+	if (IS_ERR(file)) {
+		rc = PTR_ERR(file);
+		pr_err("Unable to open file: %s (%d)", path, rc);
+		return rc;
+	}
+
+	size = i_size_read(file_inode(file));
+	if (size <= 0)
+		goto out;
+
+	buf = kmalloc(size, GFP_KERNEL);
+	if (!buf) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = integrity_kernel_read(file, 0, buf, size);
+	if (rc < 0)
+		kfree(buf);
+	else if (rc != size)
+		rc = -EIO;
+	else
+		*data = buf;
+out:
+	fput(file);
+	return rc;
+}
+
+/*
+ * integrity_load_keys - load integrity keys hook
+ *
+ * Hooks is called from init/main.c:kernel_init_freeable()
+ * when rootfs is ready
+ */
+void __init integrity_load_keys(void)
+{
+	ima_load_x509();
+}
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
new file mode 100644
index 0000000..df30334
--- /dev/null
+++ b/security/integrity/ima/Kconfig
@@ -0,0 +1,157 @@
+# IBM Integrity Measurement Architecture
+#
+config IMA
+	bool "Integrity Measurement Architecture(IMA)"
+	select SECURITYFS
+	select CRYPTO
+	select CRYPTO_HMAC
+	select CRYPTO_MD5
+	select CRYPTO_SHA1
+	select CRYPTO_HASH_INFO
+	select TCG_TPM if HAS_IOMEM && !UML
+	select TCG_TIS if TCG_TPM && X86
+	select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
+	help
+	  The Trusted Computing Group(TCG) runtime Integrity
+	  Measurement Architecture(IMA) maintains a list of hash
+	  values of executables and other sensitive system files,
+	  as they are read or executed. If an attacker manages
+	  to change the contents of an important system file
+	  being measured, we can tell.
+
+	  If your system has a TPM chip, then IMA also maintains
+	  an aggregate integrity value over this list inside the
+	  TPM hardware, so that the TPM can prove to a third party
+	  whether or not critical system files have been modified.
+	  Read <http://www.usenix.org/events/sec04/tech/sailer.html>
+	  to learn more about IMA.
+	  If unsure, say N.
+
+config IMA_MEASURE_PCR_IDX
+	int
+	depends on IMA
+	range 8 14
+	default 10
+	help
+	  IMA_MEASURE_PCR_IDX determines the TPM PCR register index
+	  that IMA uses to maintain the integrity aggregate of the
+	  measurement list.  If unsure, use the default 10.
+
+config IMA_LSM_RULES
+	bool
+	depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
+	default y
+	help
+	  Disabling this option will disregard LSM based policy rules.
+
+choice
+	prompt "Default template"
+	default IMA_NG_TEMPLATE
+	depends on IMA
+	help
+	  Select the default IMA measurement template.
+
+	  The original 'ima' measurement list template contains a
+	  hash, defined as 20 bytes, and a null terminated pathname,
+	  limited to 255 characters.  The 'ima-ng' measurement list
+	  template permits both larger hash digests and longer
+	  pathnames.
+
+	config IMA_TEMPLATE
+		bool "ima"
+	config IMA_NG_TEMPLATE
+		bool "ima-ng (default)"
+	config IMA_SIG_TEMPLATE
+		bool "ima-sig"
+endchoice
+
+config IMA_DEFAULT_TEMPLATE
+	string
+	depends on IMA
+	default "ima" if IMA_TEMPLATE
+	default "ima-ng" if IMA_NG_TEMPLATE
+	default "ima-sig" if IMA_SIG_TEMPLATE
+
+choice
+	prompt "Default integrity hash algorithm"
+	default IMA_DEFAULT_HASH_SHA1
+	depends on IMA
+	help
+	   Select the default hash algorithm used for the measurement
+	   list, integrity appraisal and audit log.  The compiled default
+	   hash algorithm can be overwritten using the kernel command
+	   line 'ima_hash=' option.
+
+	config IMA_DEFAULT_HASH_SHA1
+		bool "SHA1 (default)"
+		depends on CRYPTO_SHA1
+
+	config IMA_DEFAULT_HASH_SHA256
+		bool "SHA256"
+		depends on CRYPTO_SHA256 && !IMA_TEMPLATE
+
+	config IMA_DEFAULT_HASH_SHA512
+		bool "SHA512"
+		depends on CRYPTO_SHA512 && !IMA_TEMPLATE
+
+	config IMA_DEFAULT_HASH_WP512
+		bool "WP512"
+		depends on CRYPTO_WP512 && !IMA_TEMPLATE
+endchoice
+
+config IMA_DEFAULT_HASH
+	string
+	depends on IMA
+	default "sha1" if IMA_DEFAULT_HASH_SHA1
+	default "sha256" if IMA_DEFAULT_HASH_SHA256
+	default "sha512" if IMA_DEFAULT_HASH_SHA512
+	default "wp512" if IMA_DEFAULT_HASH_WP512
+
+config IMA_APPRAISE
+	bool "Appraise integrity measurements"
+	depends on IMA
+	default n
+	help
+	  This option enables local measurement integrity appraisal.
+	  It requires the system to be labeled with a security extended
+	  attribute containing the file hash measurement.  To protect
+	  the security extended attributes from offline attack, enable
+	  and configure EVM.
+
+	  For more information on integrity appraisal refer to:
+	  <http://linux-ima.sourceforge.net>
+	  If unsure, say N.
+
+config IMA_TRUSTED_KEYRING
+	bool "Require all keys on the .ima keyring be signed"
+	depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
+	depends on INTEGRITY_ASYMMETRIC_KEYS
+	default y
+	help
+	   This option requires that all keys added to the .ima
+	   keyring be signed by a key on the system trusted keyring.
+
+config IMA_LOAD_X509
+	bool "Load X509 certificate onto the '.ima' trusted keyring"
+	depends on IMA_TRUSTED_KEYRING
+	default n
+	help
+	   File signature verification is based on the public keys
+	   loaded on the .ima trusted keyring. These public keys are
+	   X509 certificates signed by a trusted key on the
+	   .system keyring.  This option enables X509 certificate
+	   loading from the kernel onto the '.ima' trusted keyring.
+
+config IMA_X509_PATH
+	string "IMA X509 certificate path"
+	depends on IMA_LOAD_X509
+	default "/etc/keys/x509_ima.der"
+	help
+	   This option defines IMA X509 certificate path.
+
+config IMA_APPRAISE_SIGNED_INIT
+	bool "Require signed user-space initialization"
+	depends on IMA_LOAD_X509
+	default n
+	help
+	   This option requires user-space init to be signed.
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
new file mode 100644
index 0000000..d79263d
--- /dev/null
+++ b/security/integrity/ima/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for building Trusted Computing Group's(TCG) runtime Integrity
+# Measurement Architecture(IMA).
+#
+
+obj-$(CONFIG_IMA) += ima.o
+
+ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
+	 ima_policy.o ima_template.o ima_template_lib.o
+ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
new file mode 100644
index 0000000..e2a60c3
--- /dev/null
+++ b/security/integrity/ima/ima.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima.h
+ *	internal Integrity Measurement Architecture (IMA) definitions
+ */
+
+#ifndef __LINUX_IMA_H
+#define __LINUX_IMA_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/security.h>
+#include <linux/hash.h>
+#include <linux/tpm.h>
+#include <linux/audit.h>
+
+#include "../integrity.h"
+
+enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
+		     IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
+enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
+
+/* digest size for IMA, fits SHA1 or MD5 */
+#define IMA_DIGEST_SIZE		SHA1_DIGEST_SIZE
+#define IMA_EVENT_NAME_LEN_MAX	255
+
+#define IMA_HASH_BITS 9
+#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
+
+#define IMA_TEMPLATE_FIELD_ID_MAX_LEN	16
+#define IMA_TEMPLATE_NUM_FIELDS_MAX	15
+
+#define IMA_TEMPLATE_IMA_NAME "ima"
+#define IMA_TEMPLATE_IMA_FMT "d|n"
+
+/* current content of the policy */
+extern int ima_policy_flag;
+
+/* set during initialization */
+extern int ima_initialized;
+extern int ima_used_chip;
+extern int ima_hash_algo;
+extern int ima_appraise;
+
+/* IMA event related data */
+struct ima_event_data {
+	struct integrity_iint_cache *iint;
+	struct file *file;
+	const unsigned char *filename;
+	struct evm_ima_xattr_data *xattr_value;
+	int xattr_len;
+	const char *violation;
+};
+
+/* IMA template field data definition */
+struct ima_field_data {
+	u8 *data;
+	u32 len;
+};
+
+/* IMA template field definition */
+struct ima_template_field {
+	const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN];
+	int (*field_init)(struct ima_event_data *event_data,
+			  struct ima_field_data *field_data);
+	void (*field_show)(struct seq_file *m, enum ima_show_type show,
+			   struct ima_field_data *field_data);
+};
+
+/* IMA template descriptor definition */
+struct ima_template_desc {
+	char *name;
+	char *fmt;
+	int num_fields;
+	struct ima_template_field **fields;
+};
+
+struct ima_template_entry {
+	u8 digest[TPM_DIGEST_SIZE];	/* sha1 or md5 measurement hash */
+	struct ima_template_desc *template_desc; /* template descriptor */
+	u32 template_data_len;
+	struct ima_field_data template_data[0];	/* template related data */
+};
+
+struct ima_queue_entry {
+	struct hlist_node hnext;	/* place in hash collision list */
+	struct list_head later;		/* place in ima_measurements list */
+	struct ima_template_entry *entry;
+};
+extern struct list_head ima_measurements;	/* list of all measurements */
+
+/* Internal IMA function definitions */
+int ima_init(void);
+int ima_fs_init(void);
+int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+			   const char *op, struct inode *inode,
+			   const unsigned char *filename);
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+			      struct ima_template_desc *desc, int num_fields,
+			      struct ima_digest_data *hash);
+int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
+void ima_add_violation(struct file *file, const unsigned char *filename,
+		       struct integrity_iint_cache *iint,
+		       const char *op, const char *cause);
+int ima_init_crypto(void);
+void ima_putc(struct seq_file *m, void *data, int datalen);
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
+struct ima_template_desc *ima_template_desc_current(void);
+int ima_init_template(void);
+
+/*
+ * used to protect h_table and sha_table
+ */
+extern spinlock_t ima_queue_lock;
+
+struct ima_h_table {
+	atomic_long_t len;	/* number of stored measurements in the list */
+	atomic_long_t violations;
+	struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
+};
+extern struct ima_h_table ima_htable;
+
+static inline unsigned long ima_hash_key(u8 *digest)
+{
+	return hash_long(*digest, IMA_HASH_BITS);
+}
+
+/* LIM API function definitions */
+int ima_get_action(struct inode *inode, int mask, int function);
+int ima_must_measure(struct inode *inode, int mask, int function);
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+			    struct file *file,
+			    struct evm_ima_xattr_data **xattr_value,
+			    int *xattr_len);
+void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
+			   const unsigned char *filename,
+			   struct evm_ima_xattr_data *xattr_value,
+			   int xattr_len);
+void ima_audit_measurement(struct integrity_iint_cache *iint,
+			   const unsigned char *filename);
+int ima_alloc_init_template(struct ima_event_data *event_data,
+			    struct ima_template_entry **entry);
+int ima_store_template(struct ima_template_entry *entry, int violation,
+		       struct inode *inode, const unsigned char *filename);
+void ima_free_template_entry(struct ima_template_entry *entry);
+const char *ima_d_path(struct path *path, char **pathbuf);
+
+/* IMA policy related functions */
+enum ima_hooks { FILE_CHECK = 1, MMAP_CHECK, BPRM_CHECK, MODULE_CHECK, FIRMWARE_CHECK, POST_SETATTR };
+
+int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
+		     int flags);
+void ima_init_policy(void);
+void ima_update_policy(void);
+void ima_update_policy_flag(void);
+ssize_t ima_parse_add_rule(char *);
+void ima_delete_rules(void);
+
+/* Appraise integrity measurements */
+#define IMA_APPRAISE_ENFORCE	0x01
+#define IMA_APPRAISE_FIX	0x02
+#define IMA_APPRAISE_LOG	0x04
+#define IMA_APPRAISE_MODULES	0x08
+#define IMA_APPRAISE_FIRMWARE	0x10
+
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
+			     struct file *file, const unsigned char *filename,
+			     struct evm_ima_xattr_data *xattr_value,
+			     int xattr_len, int opened);
+int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
+enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
+					   int func);
+void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
+		       struct ima_digest_data *hash);
+int ima_read_xattr(struct dentry *dentry,
+		   struct evm_ima_xattr_data **xattr_value);
+
+#else
+static inline int ima_appraise_measurement(int func,
+					   struct integrity_iint_cache *iint,
+					   struct file *file,
+					   const unsigned char *filename,
+					   struct evm_ima_xattr_data *xattr_value,
+					   int xattr_len, int opened)
+{
+	return INTEGRITY_UNKNOWN;
+}
+
+static inline int ima_must_appraise(struct inode *inode, int mask,
+				    enum ima_hooks func)
+{
+	return 0;
+}
+
+static inline void ima_update_xattr(struct integrity_iint_cache *iint,
+				    struct file *file)
+{
+}
+
+static inline enum integrity_status ima_get_cache_status(struct integrity_iint_cache
+							 *iint, int func)
+{
+	return INTEGRITY_UNKNOWN;
+}
+
+static inline void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
+				     int xattr_len,
+				     struct ima_digest_data *hash)
+{
+}
+
+static inline int ima_read_xattr(struct dentry *dentry,
+				 struct evm_ima_xattr_data **xattr_value)
+{
+	return 0;
+}
+
+#endif
+
+/* LSM based policy rules require audit */
+#ifdef CONFIG_IMA_LSM_RULES
+
+#define security_filter_rule_init security_audit_rule_init
+#define security_filter_rule_match security_audit_rule_match
+
+#else
+
+static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr,
+					    void **lsmrule)
+{
+	return -EINVAL;
+}
+
+static inline int security_filter_rule_match(u32 secid, u32 field, u32 op,
+					     void *lsmrule,
+					     struct audit_context *actx)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_IMA_LSM_RULES */
+
+#ifdef CONFIG_IMA_TRUSTED_KEYRING
+static inline int ima_init_keyring(const unsigned int id)
+{
+	return integrity_init_keyring(id);
+}
+#else
+static inline int ima_init_keyring(const unsigned int id)
+{
+	return 0;
+}
+#endif /* CONFIG_IMA_TRUSTED_KEYRING */
+#endif
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
new file mode 100644
index 0000000..2d1fe34
--- /dev/null
+++ b/security/integrity/ima/ima_api.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_api.c
+ *	Implements must_appraise_or_measure, collect_measurement,
+ *	appraise_measurement, store_measurement and store_template.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
+#include <crypto/hash_info.h>
+#include "ima.h"
+
+/*
+ * ima_free_template_entry - free an existing template entry
+ */
+void ima_free_template_entry(struct ima_template_entry *entry)
+{
+	int i;
+
+	for (i = 0; i < entry->template_desc->num_fields; i++)
+		kfree(entry->template_data[i].data);
+
+	kfree(entry);
+}
+
+/*
+ * ima_alloc_init_template - create and initialize a new template entry
+ */
+int ima_alloc_init_template(struct ima_event_data *event_data,
+			    struct ima_template_entry **entry)
+{
+	struct ima_template_desc *template_desc = ima_template_desc_current();
+	int i, result = 0;
+
+	*entry = kzalloc(sizeof(**entry) + template_desc->num_fields *
+			 sizeof(struct ima_field_data), GFP_NOFS);
+	if (!*entry)
+		return -ENOMEM;
+
+	(*entry)->template_desc = template_desc;
+	for (i = 0; i < template_desc->num_fields; i++) {
+		struct ima_template_field *field = template_desc->fields[i];
+		u32 len;
+
+		result = field->field_init(event_data,
+					   &((*entry)->template_data[i]));
+		if (result != 0)
+			goto out;
+
+		len = (*entry)->template_data[i].len;
+		(*entry)->template_data_len += sizeof(len);
+		(*entry)->template_data_len += len;
+	}
+	return 0;
+out:
+	ima_free_template_entry(*entry);
+	*entry = NULL;
+	return result;
+}
+
+/*
+ * ima_store_template - store ima template measurements
+ *
+ * Calculate the hash of a template entry, add the template entry
+ * to an ordered list of measurement entries maintained inside the kernel,
+ * and also update the aggregate integrity value (maintained inside the
+ * configured TPM PCR) over the hashes of the current list of measurement
+ * entries.
+ *
+ * Applications retrieve the current kernel-held measurement list through
+ * the securityfs entries in /sys/kernel/security/ima. The signed aggregate
+ * TPM PCR (called quote) can be retrieved using a TPM user space library
+ * and is used to validate the measurement list.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int ima_store_template(struct ima_template_entry *entry,
+		       int violation, struct inode *inode,
+		       const unsigned char *filename)
+{
+	static const char op[] = "add_template_measure";
+	static const char audit_cause[] = "hashing_error";
+	char *template_name = entry->template_desc->name;
+	int result;
+	struct {
+		struct ima_digest_data hdr;
+		char digest[TPM_DIGEST_SIZE];
+	} hash;
+
+	if (!violation) {
+		int num_fields = entry->template_desc->num_fields;
+
+		/* this function uses default algo */
+		hash.hdr.algo = HASH_ALGO_SHA1;
+		result = ima_calc_field_array_hash(&entry->template_data[0],
+						   entry->template_desc,
+						   num_fields, &hash.hdr);
+		if (result < 0) {
+			integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
+					    template_name, op,
+					    audit_cause, result, 0);
+			return result;
+		}
+		memcpy(entry->digest, hash.hdr.digest, hash.hdr.length);
+	}
+	result = ima_add_template_entry(entry, violation, op, inode, filename);
+	return result;
+}
+
+/*
+ * ima_add_violation - add violation to measurement list.
+ *
+ * Violations are flagged in the measurement list with zero hash values.
+ * By extending the PCR with 0xFF's instead of with zeroes, the PCR
+ * value is invalidated.
+ */
+void ima_add_violation(struct file *file, const unsigned char *filename,
+		       struct integrity_iint_cache *iint,
+		       const char *op, const char *cause)
+{
+	struct ima_template_entry *entry;
+	struct inode *inode = file_inode(file);
+	struct ima_event_data event_data = {iint, file, filename, NULL, 0,
+					    cause};
+	int violation = 1;
+	int result;
+
+	/* can overflow, only indicator */
+	atomic_long_inc(&ima_htable.violations);
+
+	result = ima_alloc_init_template(&event_data, &entry);
+	if (result < 0) {
+		result = -ENOMEM;
+		goto err_out;
+	}
+	result = ima_store_template(entry, violation, inode, filename);
+	if (result < 0)
+		ima_free_template_entry(entry);
+err_out:
+	integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
+			    op, cause, result, 0);
+}
+
+/**
+ * ima_get_action - appraise & measure decision based on policy.
+ * @inode: pointer to inode to measure
+ * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
+ * @function: calling function (FILE_CHECK, BPRM_CHECK, MMAP_CHECK, MODULE_CHECK)
+ *
+ * The policy is defined in terms of keypairs:
+ *		subj=, obj=, type=, func=, mask=, fsmagic=
+ *	subj,obj, and type: are LSM specific.
+ *	func: FILE_CHECK | BPRM_CHECK | MMAP_CHECK | MODULE_CHECK
+ *	mask: contains the permission mask
+ *	fsmagic: hex value
+ *
+ * Returns IMA_MEASURE, IMA_APPRAISE mask.
+ *
+ */
+int ima_get_action(struct inode *inode, int mask, int function)
+{
+	int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE;
+
+	flags &= ima_policy_flag;
+
+	return ima_match_policy(inode, function, mask, flags);
+}
+
+/*
+ * ima_collect_measurement - collect file measurement
+ *
+ * Calculate the file hash, if it doesn't already exist,
+ * storing the measurement and i_version in the iint.
+ *
+ * Must be called with iint->mutex held.
+ *
+ * Return 0 on success, error code otherwise
+ */
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+			    struct file *file,
+			    struct evm_ima_xattr_data **xattr_value,
+			    int *xattr_len)
+{
+	const char *audit_cause = "failed";
+	struct inode *inode = file_inode(file);
+	const char *filename = file->f_path.dentry->d_name.name;
+	int result = 0;
+	struct {
+		struct ima_digest_data hdr;
+		char digest[IMA_MAX_DIGEST_SIZE];
+	} hash;
+
+	if (xattr_value)
+		*xattr_len = ima_read_xattr(file_dentry(file), xattr_value);
+
+	if (!(iint->flags & IMA_COLLECTED)) {
+		u64 i_version = file_inode(file)->i_version;
+
+		if (file->f_flags & O_DIRECT) {
+			audit_cause = "failed(directio)";
+			result = -EACCES;
+			goto out;
+		}
+
+		/* use default hash algorithm */
+		hash.hdr.algo = ima_hash_algo;
+
+		if (xattr_value)
+			ima_get_hash_algo(*xattr_value, *xattr_len, &hash.hdr);
+
+		result = ima_calc_file_hash(file, &hash.hdr);
+		if (!result) {
+			int length = sizeof(hash.hdr) + hash.hdr.length;
+			void *tmpbuf = krealloc(iint->ima_hash, length,
+						GFP_NOFS);
+			if (tmpbuf) {
+				iint->ima_hash = tmpbuf;
+				memcpy(iint->ima_hash, &hash, length);
+				iint->version = i_version;
+				iint->flags |= IMA_COLLECTED;
+			} else
+				result = -ENOMEM;
+		}
+	}
+out:
+	if (result)
+		integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+				    filename, "collect_data", audit_cause,
+				    result, 0);
+	return result;
+}
+
+/*
+ * ima_store_measurement - store file measurement
+ *
+ * Create an "ima" template and then store the template by calling
+ * ima_store_template.
+ *
+ * We only get here if the inode has not already been measured,
+ * but the measurement could already exist:
+ *	- multiple copies of the same file on either the same or
+ *	  different filesystems.
+ *	- the inode was previously flushed as well as the iint info,
+ *	  containing the hashing info.
+ *
+ * Must be called with iint->mutex held.
+ */
+void ima_store_measurement(struct integrity_iint_cache *iint,
+			   struct file *file, const unsigned char *filename,
+			   struct evm_ima_xattr_data *xattr_value,
+			   int xattr_len)
+{
+	static const char op[] = "add_template_measure";
+	static const char audit_cause[] = "ENOMEM";
+	int result = -ENOMEM;
+	struct inode *inode = file_inode(file);
+	struct ima_template_entry *entry;
+	struct ima_event_data event_data = {iint, file, filename, xattr_value,
+					    xattr_len, NULL};
+	int violation = 0;
+
+	if (iint->flags & IMA_MEASURED)
+		return;
+
+	result = ima_alloc_init_template(&event_data, &entry);
+	if (result < 0) {
+		integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
+				    op, audit_cause, result, 0);
+		return;
+	}
+
+	result = ima_store_template(entry, violation, inode, filename);
+	if (!result || result == -EEXIST)
+		iint->flags |= IMA_MEASURED;
+	if (result < 0)
+		ima_free_template_entry(entry);
+}
+
+void ima_audit_measurement(struct integrity_iint_cache *iint,
+			   const unsigned char *filename)
+{
+	struct audit_buffer *ab;
+	char hash[(iint->ima_hash->length * 2) + 1];
+	const char *algo_name = hash_algo_name[iint->ima_hash->algo];
+	char algo_hash[sizeof(hash) + strlen(algo_name) + 2];
+	int i;
+
+	if (iint->flags & IMA_AUDITED)
+		return;
+
+	for (i = 0; i < iint->ima_hash->length; i++)
+		hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]);
+	hash[i * 2] = '\0';
+
+	ab = audit_log_start(current->audit_context, GFP_KERNEL,
+			     AUDIT_INTEGRITY_RULE);
+	if (!ab)
+		return;
+
+	audit_log_format(ab, "file=");
+	audit_log_untrustedstring(ab, filename);
+	audit_log_format(ab, " hash=");
+	snprintf(algo_hash, sizeof(algo_hash), "%s:%s", algo_name, hash);
+	audit_log_untrustedstring(ab, algo_hash);
+
+	audit_log_task_info(ab, current);
+	audit_log_end(ab);
+
+	iint->flags |= IMA_AUDITED;
+}
+
+const char *ima_d_path(struct path *path, char **pathbuf)
+{
+	char *pathname = NULL;
+
+	*pathbuf = __getname();
+	if (*pathbuf) {
+		pathname = d_absolute_path(path, *pathbuf, PATH_MAX);
+		if (IS_ERR(pathname)) {
+			__putname(*pathbuf);
+			*pathbuf = NULL;
+			pathname = NULL;
+		}
+	}
+	return pathname ?: (const char *)path->dentry->d_name.name;
+}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
new file mode 100644
index 0000000..9ce9d50
--- /dev/null
+++ b/security/integrity/ima/ima_appraise.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/magic.h>
+#include <linux/ima.h>
+#include <linux/evm.h>
+#include <crypto/hash_info.h>
+
+#include "ima.h"
+
+static int __init default_appraise_setup(char *str)
+{
+	if (strncmp(str, "off", 3) == 0)
+		ima_appraise = 0;
+	else if (strncmp(str, "log", 3) == 0)
+		ima_appraise = IMA_APPRAISE_LOG;
+	else if (strncmp(str, "fix", 3) == 0)
+		ima_appraise = IMA_APPRAISE_FIX;
+	return 1;
+}
+
+__setup("ima_appraise=", default_appraise_setup);
+
+/*
+ * ima_must_appraise - set appraise flag
+ *
+ * Return 1 to appraise
+ */
+int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
+{
+	if (!ima_appraise)
+		return 0;
+
+	return ima_match_policy(inode, func, mask, IMA_APPRAISE);
+}
+
+static int ima_fix_xattr(struct dentry *dentry,
+			 struct integrity_iint_cache *iint)
+{
+	int rc, offset;
+	u8 algo = iint->ima_hash->algo;
+
+	if (algo <= HASH_ALGO_SHA1) {
+		offset = 1;
+		iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST;
+	} else {
+		offset = 0;
+		iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG;
+		iint->ima_hash->xattr.ng.algo = algo;
+	}
+	rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA,
+				   &iint->ima_hash->xattr.data[offset],
+				   (sizeof(iint->ima_hash->xattr) - offset) +
+				   iint->ima_hash->length, 0);
+	return rc;
+}
+
+/* Return specific func appraised cached result */
+enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
+					   int func)
+{
+	switch (func) {
+	case MMAP_CHECK:
+		return iint->ima_mmap_status;
+	case BPRM_CHECK:
+		return iint->ima_bprm_status;
+	case MODULE_CHECK:
+		return iint->ima_module_status;
+	case FIRMWARE_CHECK:
+		return iint->ima_firmware_status;
+	case FILE_CHECK:
+	default:
+		return iint->ima_file_status;
+	}
+}
+
+static void ima_set_cache_status(struct integrity_iint_cache *iint,
+				 int func, enum integrity_status status)
+{
+	switch (func) {
+	case MMAP_CHECK:
+		iint->ima_mmap_status = status;
+		break;
+	case BPRM_CHECK:
+		iint->ima_bprm_status = status;
+		break;
+	case MODULE_CHECK:
+		iint->ima_module_status = status;
+		break;
+	case FIRMWARE_CHECK:
+		iint->ima_firmware_status = status;
+		break;
+	case FILE_CHECK:
+	default:
+		iint->ima_file_status = status;
+		break;
+	}
+}
+
+static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
+{
+	switch (func) {
+	case MMAP_CHECK:
+		iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED);
+		break;
+	case BPRM_CHECK:
+		iint->flags |= (IMA_BPRM_APPRAISED | IMA_APPRAISED);
+		break;
+	case MODULE_CHECK:
+		iint->flags |= (IMA_MODULE_APPRAISED | IMA_APPRAISED);
+		break;
+	case FIRMWARE_CHECK:
+		iint->flags |= (IMA_FIRMWARE_APPRAISED | IMA_APPRAISED);
+		break;
+	case FILE_CHECK:
+	default:
+		iint->flags |= (IMA_FILE_APPRAISED | IMA_APPRAISED);
+		break;
+	}
+}
+
+void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
+		       struct ima_digest_data *hash)
+{
+	struct signature_v2_hdr *sig;
+
+	if (!xattr_value || xattr_len < 2)
+		return;
+
+	switch (xattr_value->type) {
+	case EVM_IMA_XATTR_DIGSIG:
+		sig = (typeof(sig))xattr_value;
+		if (sig->version != 2 || xattr_len <= sizeof(*sig))
+			return;
+		hash->algo = sig->hash_algo;
+		break;
+	case IMA_XATTR_DIGEST_NG:
+		hash->algo = xattr_value->digest[0];
+		break;
+	case IMA_XATTR_DIGEST:
+		/* this is for backward compatibility */
+		if (xattr_len == 21) {
+			unsigned int zero = 0;
+			if (!memcmp(&xattr_value->digest[16], &zero, 4))
+				hash->algo = HASH_ALGO_MD5;
+			else
+				hash->algo = HASH_ALGO_SHA1;
+		} else if (xattr_len == 17)
+			hash->algo = HASH_ALGO_MD5;
+		break;
+	}
+}
+
+int ima_read_xattr(struct dentry *dentry,
+		   struct evm_ima_xattr_data **xattr_value)
+{
+	struct inode *inode = d_backing_inode(dentry);
+
+	if (!inode->i_op->getxattr)
+		return 0;
+
+	return vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value,
+				  0, GFP_NOFS);
+}
+
+/*
+ * ima_appraise_measurement - appraise file measurement
+ *
+ * Call evm_verifyxattr() to verify the integrity of 'security.ima'.
+ * Assuming success, compare the xattr hash with the collected measurement.
+ *
+ * Return 0 on success, error code otherwise
+ */
+int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
+			     struct file *file, const unsigned char *filename,
+			     struct evm_ima_xattr_data *xattr_value,
+			     int xattr_len, int opened)
+{
+	static const char op[] = "appraise_data";
+	char *cause = "unknown";
+	struct dentry *dentry = file_dentry(file);
+	struct inode *inode = d_backing_inode(dentry);
+	enum integrity_status status = INTEGRITY_UNKNOWN;
+	int rc = xattr_len, hash_start = 0;
+
+	if (!inode->i_op->getxattr)
+		return INTEGRITY_UNKNOWN;
+
+	if (rc <= 0) {
+		if (rc && rc != -ENODATA)
+			goto out;
+
+		cause = "missing-hash";
+		status = INTEGRITY_NOLABEL;
+		if (opened & FILE_CREATED)
+			iint->flags |= IMA_NEW_FILE;
+		if ((iint->flags & IMA_NEW_FILE) &&
+		    !(iint->flags & IMA_DIGSIG_REQUIRED))
+			status = INTEGRITY_PASS;
+		goto out;
+	}
+
+	status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
+	if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) {
+		if ((status == INTEGRITY_NOLABEL)
+		    || (status == INTEGRITY_NOXATTRS))
+			cause = "missing-HMAC";
+		else if (status == INTEGRITY_FAIL)
+			cause = "invalid-HMAC";
+		goto out;
+	}
+	switch (xattr_value->type) {
+	case IMA_XATTR_DIGEST_NG:
+		/* first byte contains algorithm id */
+		hash_start = 1;
+	case IMA_XATTR_DIGEST:
+		if (iint->flags & IMA_DIGSIG_REQUIRED) {
+			cause = "IMA-signature-required";
+			status = INTEGRITY_FAIL;
+			break;
+		}
+		if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+				iint->ima_hash->length)
+			/* xattr length may be longer. md5 hash in previous
+			   version occupied 20 bytes in xattr, instead of 16
+			 */
+			rc = memcmp(&xattr_value->digest[hash_start],
+				    iint->ima_hash->digest,
+				    iint->ima_hash->length);
+		else
+			rc = -EINVAL;
+		if (rc) {
+			cause = "invalid-hash";
+			status = INTEGRITY_FAIL;
+			break;
+		}
+		status = INTEGRITY_PASS;
+		break;
+	case EVM_IMA_XATTR_DIGSIG:
+		iint->flags |= IMA_DIGSIG;
+		rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+					     (const char *)xattr_value, rc,
+					     iint->ima_hash->digest,
+					     iint->ima_hash->length);
+		if (rc == -EOPNOTSUPP) {
+			status = INTEGRITY_UNKNOWN;
+		} else if (rc) {
+			cause = "invalid-signature";
+			status = INTEGRITY_FAIL;
+		} else {
+			status = INTEGRITY_PASS;
+		}
+		break;
+	default:
+		status = INTEGRITY_UNKNOWN;
+		cause = "unknown-ima-data";
+		break;
+	}
+
+out:
+	if (status != INTEGRITY_PASS) {
+		if ((ima_appraise & IMA_APPRAISE_FIX) &&
+		    (!xattr_value ||
+		     xattr_value->type != EVM_IMA_XATTR_DIGSIG)) {
+			if (!ima_fix_xattr(dentry, iint))
+				status = INTEGRITY_PASS;
+		}
+		integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+				    op, cause, rc, 0);
+	} else {
+		ima_cache_flags(iint, func);
+	}
+	ima_set_cache_status(iint, func, status);
+	return status;
+}
+
+/*
+ * ima_update_xattr - update 'security.ima' hash value
+ */
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+{
+	struct dentry *dentry = file_dentry(file);
+	int rc = 0;
+
+	/* do not collect and update hash for digital signatures */
+	if (iint->flags & IMA_DIGSIG)
+		return;
+
+	rc = ima_collect_measurement(iint, file, NULL, NULL);
+	if (rc < 0)
+		return;
+
+	ima_fix_xattr(dentry, iint);
+}
+
+/**
+ * ima_inode_post_setattr - reflect file metadata changes
+ * @dentry: pointer to the affected dentry
+ *
+ * Changes to a dentry's metadata might result in needing to appraise.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void ima_inode_post_setattr(struct dentry *dentry)
+{
+	struct inode *inode = d_backing_inode(dentry);
+	struct integrity_iint_cache *iint;
+	int must_appraise, rc;
+
+	if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode)
+	    || !inode->i_op->removexattr)
+		return;
+
+	must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
+	iint = integrity_iint_find(inode);
+	if (iint) {
+		iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+				 IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+				 IMA_ACTION_FLAGS);
+		if (must_appraise)
+			iint->flags |= IMA_APPRAISE;
+	}
+	if (!must_appraise)
+		rc = inode->i_op->removexattr(dentry, XATTR_NAME_IMA);
+	return;
+}
+
+/*
+ * ima_protect_xattr - protect 'security.ima'
+ *
+ * Ensure that not just anyone can modify or remove 'security.ima'.
+ */
+static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name,
+			     const void *xattr_value, size_t xattr_value_len)
+{
+	if (strcmp(xattr_name, XATTR_NAME_IMA) == 0) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		return 1;
+	}
+	return 0;
+}
+
+static void ima_reset_appraise_flags(struct inode *inode, int digsig)
+{
+	struct integrity_iint_cache *iint;
+
+	if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode))
+		return;
+
+	iint = integrity_iint_find(inode);
+	if (!iint)
+		return;
+
+	iint->flags &= ~IMA_DONE_MASK;
+	if (digsig)
+		iint->flags |= IMA_DIGSIG;
+	return;
+}
+
+int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+		       const void *xattr_value, size_t xattr_value_len)
+{
+	const struct evm_ima_xattr_data *xvalue = xattr_value;
+	int result;
+
+	result = ima_protect_xattr(dentry, xattr_name, xattr_value,
+				   xattr_value_len);
+	if (result == 1) {
+		bool digsig;
+
+		if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
+			return -EINVAL;
+		digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG);
+		if (!digsig && (ima_appraise & IMA_APPRAISE_ENFORCE))
+			return -EPERM;
+		ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
+		result = 0;
+	}
+	return result;
+}
+
+int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+	int result;
+
+	result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
+	if (result == 1) {
+		ima_reset_appraise_flags(d_backing_inode(dentry), 0);
+		result = 0;
+	}
+	return result;
+}
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
new file mode 100644
index 0000000..6eb6293
--- /dev/null
+++ b/security/integrity/ima/ima_crypto.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: ima_crypto.c
+ *	Calculates md5/sha1 file hash, template hash, boot-aggreate hash
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/ratelimit.h>
+#include <linux/file.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <crypto/hash.h>
+#include <crypto/hash_info.h>
+#include "ima.h"
+
+struct ahash_completion {
+	struct completion completion;
+	int err;
+};
+
+/* minimum file size for ahash use */
+static unsigned long ima_ahash_minsize;
+module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
+MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
+
+/* default is 0 - 1 page. */
+static int ima_maxorder;
+static unsigned int ima_bufsize = PAGE_SIZE;
+
+static int param_set_bufsize(const char *val, const struct kernel_param *kp)
+{
+	unsigned long long size;
+	int order;
+
+	size = memparse(val, NULL);
+	order = get_order(size);
+	if (order >= MAX_ORDER)
+		return -EINVAL;
+	ima_maxorder = order;
+	ima_bufsize = PAGE_SIZE << order;
+	return 0;
+}
+
+static const struct kernel_param_ops param_ops_bufsize = {
+	.set = param_set_bufsize,
+	.get = param_get_uint,
+};
+#define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
+
+module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
+MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
+
+static struct crypto_shash *ima_shash_tfm;
+static struct crypto_ahash *ima_ahash_tfm;
+
+int __init ima_init_crypto(void)
+{
+	long rc;
+
+	ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
+	if (IS_ERR(ima_shash_tfm)) {
+		rc = PTR_ERR(ima_shash_tfm);
+		pr_err("Can not allocate %s (reason: %ld)\n",
+		       hash_algo_name[ima_hash_algo], rc);
+		return rc;
+	}
+	return 0;
+}
+
+static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
+{
+	struct crypto_shash *tfm = ima_shash_tfm;
+	int rc;
+
+	if (algo < 0 || algo >= HASH_ALGO__LAST)
+		algo = ima_hash_algo;
+
+	if (algo != ima_hash_algo) {
+		tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
+		if (IS_ERR(tfm)) {
+			rc = PTR_ERR(tfm);
+			pr_err("Can not allocate %s (reason: %d)\n",
+			       hash_algo_name[algo], rc);
+		}
+	}
+	return tfm;
+}
+
+static void ima_free_tfm(struct crypto_shash *tfm)
+{
+	if (tfm != ima_shash_tfm)
+		crypto_free_shash(tfm);
+}
+
+/**
+ * ima_alloc_pages() - Allocate contiguous pages.
+ * @max_size:       Maximum amount of memory to allocate.
+ * @allocated_size: Returned size of actual allocation.
+ * @last_warn:      Should the min_size allocation warn or not.
+ *
+ * Tries to do opportunistic allocation for memory first trying to allocate
+ * max_size amount of memory and then splitting that until zero order is
+ * reached. Allocation is tried without generating allocation warnings unless
+ * last_warn is set. Last_warn set affects only last allocation of zero order.
+ *
+ * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
+ *
+ * Return pointer to allocated memory, or NULL on failure.
+ */
+static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
+			     int last_warn)
+{
+	void *ptr;
+	int order = ima_maxorder;
+	gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
+
+	if (order)
+		order = min(get_order(max_size), order);
+
+	for (; order; order--) {
+		ptr = (void *)__get_free_pages(gfp_mask, order);
+		if (ptr) {
+			*allocated_size = PAGE_SIZE << order;
+			return ptr;
+		}
+	}
+
+	/* order is zero - one page */
+
+	gfp_mask = GFP_KERNEL;
+
+	if (!last_warn)
+		gfp_mask |= __GFP_NOWARN;
+
+	ptr = (void *)__get_free_pages(gfp_mask, 0);
+	if (ptr) {
+		*allocated_size = PAGE_SIZE;
+		return ptr;
+	}
+
+	*allocated_size = 0;
+	return NULL;
+}
+
+/**
+ * ima_free_pages() - Free pages allocated by ima_alloc_pages().
+ * @ptr:  Pointer to allocated pages.
+ * @size: Size of allocated buffer.
+ */
+static void ima_free_pages(void *ptr, size_t size)
+{
+	if (!ptr)
+		return;
+	free_pages((unsigned long)ptr, get_order(size));
+}
+
+static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
+{
+	struct crypto_ahash *tfm = ima_ahash_tfm;
+	int rc;
+
+	if (algo < 0 || algo >= HASH_ALGO__LAST)
+		algo = ima_hash_algo;
+
+	if (algo != ima_hash_algo || !tfm) {
+		tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
+		if (!IS_ERR(tfm)) {
+			if (algo == ima_hash_algo)
+				ima_ahash_tfm = tfm;
+		} else {
+			rc = PTR_ERR(tfm);
+			pr_err("Can not allocate %s (reason: %d)\n",
+			       hash_algo_name[algo], rc);
+		}
+	}
+	return tfm;
+}
+
+static void ima_free_atfm(struct crypto_ahash *tfm)
+{
+	if (tfm != ima_ahash_tfm)
+		crypto_free_ahash(tfm);
+}
+
+static void ahash_complete(struct crypto_async_request *req, int err)
+{
+	struct ahash_completion *res = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+	res->err = err;
+	complete(&res->completion);
+}
+
+static int ahash_wait(int err, struct ahash_completion *res)
+{
+	switch (err) {
+	case 0:
+		break;
+	case -EINPROGRESS:
+	case -EBUSY:
+		wait_for_completion(&res->completion);
+		reinit_completion(&res->completion);
+		err = res->err;
+		/* fall through */
+	default:
+		pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
+	}
+
+	return err;
+}
+
+static int ima_calc_file_hash_atfm(struct file *file,
+				   struct ima_digest_data *hash,
+				   struct crypto_ahash *tfm)
+{
+	loff_t i_size, offset;
+	char *rbuf[2] = { NULL, };
+	int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0;
+	struct ahash_request *req;
+	struct scatterlist sg[1];
+	struct ahash_completion res;
+	size_t rbuf_size[2];
+
+	hash->length = crypto_ahash_digestsize(tfm);
+
+	req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	init_completion(&res.completion);
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+				   CRYPTO_TFM_REQ_MAY_SLEEP,
+				   ahash_complete, &res);
+
+	rc = ahash_wait(crypto_ahash_init(req), &res);
+	if (rc)
+		goto out1;
+
+	i_size = i_size_read(file_inode(file));
+
+	if (i_size == 0)
+		goto out2;
+
+	/*
+	 * Try to allocate maximum size of memory.
+	 * Fail if even a single page cannot be allocated.
+	 */
+	rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
+	if (!rbuf[0]) {
+		rc = -ENOMEM;
+		goto out1;
+	}
+
+	/* Only allocate one buffer if that is enough. */
+	if (i_size > rbuf_size[0]) {
+		/*
+		 * Try to allocate secondary buffer. If that fails fallback to
+		 * using single buffering. Use previous memory allocation size
+		 * as baseline for possible allocation size.
+		 */
+		rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
+					  &rbuf_size[1], 0);
+	}
+
+	if (!(file->f_mode & FMODE_READ)) {
+		file->f_mode |= FMODE_READ;
+		read = 1;
+	}
+
+	for (offset = 0; offset < i_size; offset += rbuf_len) {
+		if (!rbuf[1] && offset) {
+			/* Not using two buffers, and it is not the first
+			 * read/request, wait for the completion of the
+			 * previous ahash_update() request.
+			 */
+			rc = ahash_wait(ahash_rc, &res);
+			if (rc)
+				goto out3;
+		}
+		/* read buffer */
+		rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
+		rc = integrity_kernel_read(file, offset, rbuf[active],
+					   rbuf_len);
+		if (rc != rbuf_len)
+			goto out3;
+
+		if (rbuf[1] && offset) {
+			/* Using two buffers, and it is not the first
+			 * read/request, wait for the completion of the
+			 * previous ahash_update() request.
+			 */
+			rc = ahash_wait(ahash_rc, &res);
+			if (rc)
+				goto out3;
+		}
+
+		sg_init_one(&sg[0], rbuf[active], rbuf_len);
+		ahash_request_set_crypt(req, sg, NULL, rbuf_len);
+
+		ahash_rc = crypto_ahash_update(req);
+
+		if (rbuf[1])
+			active = !active; /* swap buffers, if we use two */
+	}
+	/* wait for the last update request to complete */
+	rc = ahash_wait(ahash_rc, &res);
+out3:
+	if (read)
+		file->f_mode &= ~FMODE_READ;
+	ima_free_pages(rbuf[0], rbuf_size[0]);
+	ima_free_pages(rbuf[1], rbuf_size[1]);
+out2:
+	if (!rc) {
+		ahash_request_set_crypt(req, NULL, hash->digest, 0);
+		rc = ahash_wait(crypto_ahash_final(req), &res);
+	}
+out1:
+	ahash_request_free(req);
+	return rc;
+}
+
+static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
+{
+	struct crypto_ahash *tfm;
+	int rc;
+
+	tfm = ima_alloc_atfm(hash->algo);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	rc = ima_calc_file_hash_atfm(file, hash, tfm);
+
+	ima_free_atfm(tfm);
+
+	return rc;
+}
+
+static int ima_calc_file_hash_tfm(struct file *file,
+				  struct ima_digest_data *hash,
+				  struct crypto_shash *tfm)
+{
+	loff_t i_size, offset = 0;
+	char *rbuf;
+	int rc, read = 0;
+	SHASH_DESC_ON_STACK(shash, tfm);
+
+	shash->tfm = tfm;
+	shash->flags = 0;
+
+	hash->length = crypto_shash_digestsize(tfm);
+
+	rc = crypto_shash_init(shash);
+	if (rc != 0)
+		return rc;
+
+	i_size = i_size_read(file_inode(file));
+
+	if (i_size == 0)
+		goto out;
+
+	rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!rbuf)
+		return -ENOMEM;
+
+	if (!(file->f_mode & FMODE_READ)) {
+		file->f_mode |= FMODE_READ;
+		read = 1;
+	}
+
+	while (offset < i_size) {
+		int rbuf_len;
+
+		rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
+		if (rbuf_len < 0) {
+			rc = rbuf_len;
+			break;
+		}
+		if (rbuf_len == 0)
+			break;
+		offset += rbuf_len;
+
+		rc = crypto_shash_update(shash, rbuf, rbuf_len);
+		if (rc)
+			break;
+	}
+	if (read)
+		file->f_mode &= ~FMODE_READ;
+	kfree(rbuf);
+out:
+	if (!rc)
+		rc = crypto_shash_final(shash, hash->digest);
+	return rc;
+}
+
+static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
+{
+	struct crypto_shash *tfm;
+	int rc;
+
+	tfm = ima_alloc_tfm(hash->algo);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	rc = ima_calc_file_hash_tfm(file, hash, tfm);
+
+	ima_free_tfm(tfm);
+
+	return rc;
+}
+
+/*
+ * ima_calc_file_hash - calculate file hash
+ *
+ * Asynchronous hash (ahash) allows using HW acceleration for calculating
+ * a hash. ahash performance varies for different data sizes on different
+ * crypto accelerators. shash performance might be better for smaller files.
+ * The 'ima.ahash_minsize' module parameter allows specifying the best
+ * minimum file size for using ahash on the system.
+ *
+ * If the ima.ahash_minsize parameter is not specified, this function uses
+ * shash for the hash calculation.  If ahash fails, it falls back to using
+ * shash.
+ */
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
+{
+	loff_t i_size;
+	int rc;
+
+	i_size = i_size_read(file_inode(file));
+
+	if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
+		rc = ima_calc_file_ahash(file, hash);
+		if (!rc)
+			return 0;
+	}
+
+	return ima_calc_file_shash(file, hash);
+}
+
+/*
+ * Calculate the hash of template data
+ */
+static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
+					 struct ima_template_desc *td,
+					 int num_fields,
+					 struct ima_digest_data *hash,
+					 struct crypto_shash *tfm)
+{
+	SHASH_DESC_ON_STACK(shash, tfm);
+	int rc, i;
+
+	shash->tfm = tfm;
+	shash->flags = 0;
+
+	hash->length = crypto_shash_digestsize(tfm);
+
+	rc = crypto_shash_init(shash);
+	if (rc != 0)
+		return rc;
+
+	for (i = 0; i < num_fields; i++) {
+		u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
+		u8 *data_to_hash = field_data[i].data;
+		u32 datalen = field_data[i].len;
+
+		if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
+			rc = crypto_shash_update(shash,
+						(const u8 *) &field_data[i].len,
+						sizeof(field_data[i].len));
+			if (rc)
+				break;
+		} else if (strcmp(td->fields[i]->field_id, "n") == 0) {
+			memcpy(buffer, data_to_hash, datalen);
+			data_to_hash = buffer;
+			datalen = IMA_EVENT_NAME_LEN_MAX + 1;
+		}
+		rc = crypto_shash_update(shash, data_to_hash, datalen);
+		if (rc)
+			break;
+	}
+
+	if (!rc)
+		rc = crypto_shash_final(shash, hash->digest);
+
+	return rc;
+}
+
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+			      struct ima_template_desc *desc, int num_fields,
+			      struct ima_digest_data *hash)
+{
+	struct crypto_shash *tfm;
+	int rc;
+
+	tfm = ima_alloc_tfm(hash->algo);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
+					   hash, tfm);
+
+	ima_free_tfm(tfm);
+
+	return rc;
+}
+
+static void __init ima_pcrread(int idx, u8 *pcr)
+{
+	if (!ima_used_chip)
+		return;
+
+	if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0)
+		pr_err("Error Communicating to TPM chip\n");
+}
+
+/*
+ * Calculate the boot aggregate hash
+ */
+static int __init ima_calc_boot_aggregate_tfm(char *digest,
+					      struct crypto_shash *tfm)
+{
+	u8 pcr_i[TPM_DIGEST_SIZE];
+	int rc, i;
+	SHASH_DESC_ON_STACK(shash, tfm);
+
+	shash->tfm = tfm;
+	shash->flags = 0;
+
+	rc = crypto_shash_init(shash);
+	if (rc != 0)
+		return rc;
+
+	/* cumulative sha1 over tpm registers 0-7 */
+	for (i = TPM_PCR0; i < TPM_PCR8; i++) {
+		ima_pcrread(i, pcr_i);
+		/* now accumulate with current aggregate */
+		rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE);
+	}
+	if (!rc)
+		crypto_shash_final(shash, digest);
+	return rc;
+}
+
+int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
+{
+	struct crypto_shash *tfm;
+	int rc;
+
+	tfm = ima_alloc_tfm(hash->algo);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	hash->length = crypto_shash_digestsize(tfm);
+	rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
+
+	ima_free_tfm(tfm);
+
+	return rc;
+}
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
new file mode 100644
index 0000000..816d175
--- /dev/null
+++ b/security/integrity/ima/ima_fs.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Reiner Sailer <sailer@us.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_fs.c
+ *	implemenents security file system for reporting
+ *	current measurement list and IMA statistics
+ */
+#include <linux/fcntl.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/parser.h>
+
+#include "ima.h"
+
+static int valid_policy = 1;
+#define TMPBUFLEN 12
+static ssize_t ima_show_htable_value(char __user *buf, size_t count,
+				     loff_t *ppos, atomic_long_t *val)
+{
+	char tmpbuf[TMPBUFLEN];
+	ssize_t len;
+
+	len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
+}
+
+static ssize_t ima_show_htable_violations(struct file *filp,
+					  char __user *buf,
+					  size_t count, loff_t *ppos)
+{
+	return ima_show_htable_value(buf, count, ppos, &ima_htable.violations);
+}
+
+static const struct file_operations ima_htable_violations_ops = {
+	.read = ima_show_htable_violations,
+	.llseek = generic_file_llseek,
+};
+
+static ssize_t ima_show_measurements_count(struct file *filp,
+					   char __user *buf,
+					   size_t count, loff_t *ppos)
+{
+	return ima_show_htable_value(buf, count, ppos, &ima_htable.len);
+
+}
+
+static const struct file_operations ima_measurements_count_ops = {
+	.read = ima_show_measurements_count,
+	.llseek = generic_file_llseek,
+};
+
+/* returns pointer to hlist_node */
+static void *ima_measurements_start(struct seq_file *m, loff_t *pos)
+{
+	loff_t l = *pos;
+	struct ima_queue_entry *qe;
+
+	/* we need a lock since pos could point beyond last element */
+	rcu_read_lock();
+	list_for_each_entry_rcu(qe, &ima_measurements, later) {
+		if (!l--) {
+			rcu_read_unlock();
+			return qe;
+		}
+	}
+	rcu_read_unlock();
+	return NULL;
+}
+
+static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct ima_queue_entry *qe = v;
+
+	/* lock protects when reading beyond last element
+	 * against concurrent list-extension
+	 */
+	rcu_read_lock();
+	qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
+	rcu_read_unlock();
+	(*pos)++;
+
+	return (&qe->later == &ima_measurements) ? NULL : qe;
+}
+
+static void ima_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+void ima_putc(struct seq_file *m, void *data, int datalen)
+{
+	while (datalen--)
+		seq_putc(m, *(char *)data++);
+}
+
+/* print format:
+ *       32bit-le=pcr#
+ *       char[20]=template digest
+ *       32bit-le=template name size
+ *       char[n]=template name
+ *       [eventdata length]
+ *       eventdata[n]=template specific data
+ */
+static int ima_measurements_show(struct seq_file *m, void *v)
+{
+	/* the list never shrinks, so we don't need a lock here */
+	struct ima_queue_entry *qe = v;
+	struct ima_template_entry *e;
+	char *template_name;
+	int namelen;
+	u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+	bool is_ima_template = false;
+	int i;
+
+	/* get entry */
+	e = qe->entry;
+	if (e == NULL)
+		return -1;
+
+	template_name = (e->template_desc->name[0] != '\0') ?
+	    e->template_desc->name : e->template_desc->fmt;
+
+	/*
+	 * 1st: PCRIndex
+	 * PCR used is always the same (config option) in
+	 * little-endian format
+	 */
+	ima_putc(m, &pcr, sizeof(pcr));
+
+	/* 2nd: template digest */
+	ima_putc(m, e->digest, TPM_DIGEST_SIZE);
+
+	/* 3rd: template name size */
+	namelen = strlen(template_name);
+	ima_putc(m, &namelen, sizeof(namelen));
+
+	/* 4th:  template name */
+	ima_putc(m, template_name, namelen);
+
+	/* 5th:  template length (except for 'ima' template) */
+	if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0)
+		is_ima_template = true;
+
+	if (!is_ima_template)
+		ima_putc(m, &e->template_data_len,
+			 sizeof(e->template_data_len));
+
+	/* 6th:  template specific data */
+	for (i = 0; i < e->template_desc->num_fields; i++) {
+		enum ima_show_type show = IMA_SHOW_BINARY;
+		struct ima_template_field *field = e->template_desc->fields[i];
+
+		if (is_ima_template && strcmp(field->field_id, "d") == 0)
+			show = IMA_SHOW_BINARY_NO_FIELD_LEN;
+		if (is_ima_template && strcmp(field->field_id, "n") == 0)
+			show = IMA_SHOW_BINARY_OLD_STRING_FMT;
+		field->field_show(m, show, &e->template_data[i]);
+	}
+	return 0;
+}
+
+static const struct seq_operations ima_measurments_seqops = {
+	.start = ima_measurements_start,
+	.next = ima_measurements_next,
+	.stop = ima_measurements_stop,
+	.show = ima_measurements_show
+};
+
+static int ima_measurements_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &ima_measurments_seqops);
+}
+
+static const struct file_operations ima_measurements_ops = {
+	.open = ima_measurements_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size)
+{
+	u32 i;
+
+	for (i = 0; i < size; i++)
+		seq_printf(m, "%02x", *(digest + i));
+}
+
+/* print in ascii */
+static int ima_ascii_measurements_show(struct seq_file *m, void *v)
+{
+	/* the list never shrinks, so we don't need a lock here */
+	struct ima_queue_entry *qe = v;
+	struct ima_template_entry *e;
+	char *template_name;
+	int i;
+
+	/* get entry */
+	e = qe->entry;
+	if (e == NULL)
+		return -1;
+
+	template_name = (e->template_desc->name[0] != '\0') ?
+	    e->template_desc->name : e->template_desc->fmt;
+
+	/* 1st: PCR used (config option) */
+	seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX);
+
+	/* 2nd: SHA1 template hash */
+	ima_print_digest(m, e->digest, TPM_DIGEST_SIZE);
+
+	/* 3th:  template name */
+	seq_printf(m, " %s", template_name);
+
+	/* 4th:  template specific data */
+	for (i = 0; i < e->template_desc->num_fields; i++) {
+		seq_puts(m, " ");
+		if (e->template_data[i].len == 0)
+			continue;
+
+		e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII,
+							&e->template_data[i]);
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static const struct seq_operations ima_ascii_measurements_seqops = {
+	.start = ima_measurements_start,
+	.next = ima_measurements_next,
+	.stop = ima_measurements_stop,
+	.show = ima_ascii_measurements_show
+};
+
+static int ima_ascii_measurements_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &ima_ascii_measurements_seqops);
+}
+
+static const struct file_operations ima_ascii_measurements_ops = {
+	.open = ima_ascii_measurements_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static ssize_t ima_write_policy(struct file *file, const char __user *buf,
+				size_t datalen, loff_t *ppos)
+{
+	char *data = NULL;
+	ssize_t result;
+
+	if (datalen >= PAGE_SIZE)
+		datalen = PAGE_SIZE - 1;
+
+	/* No partial writes. */
+	result = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	result = -ENOMEM;
+	data = kmalloc(datalen + 1, GFP_KERNEL);
+	if (!data)
+		goto out;
+
+	*(data + datalen) = '\0';
+
+	result = -EFAULT;
+	if (copy_from_user(data, buf, datalen))
+		goto out;
+
+	result = ima_parse_add_rule(data);
+out:
+	if (result < 0)
+		valid_policy = 0;
+	kfree(data);
+	return result;
+}
+
+static struct dentry *ima_dir;
+static struct dentry *binary_runtime_measurements;
+static struct dentry *ascii_runtime_measurements;
+static struct dentry *runtime_measurements_count;
+static struct dentry *violations;
+static struct dentry *ima_policy;
+
+enum ima_fs_flags {
+	IMA_FS_BUSY,
+};
+
+static unsigned long ima_fs_flags;
+
+/*
+ * ima_open_policy: sequentialize access to the policy file
+ */
+static int ima_open_policy(struct inode *inode, struct file *filp)
+{
+	/* No point in being allowed to open it if you aren't going to write */
+	if (!(filp->f_flags & O_WRONLY))
+		return -EACCES;
+	if (test_and_set_bit(IMA_FS_BUSY, &ima_fs_flags))
+		return -EBUSY;
+	return 0;
+}
+
+/*
+ * ima_release_policy - start using the new measure policy rules.
+ *
+ * Initially, ima_measure points to the default policy rules, now
+ * point to the new policy rules, and remove the securityfs policy file,
+ * assuming a valid policy.
+ */
+static int ima_release_policy(struct inode *inode, struct file *file)
+{
+	const char *cause = valid_policy ? "completed" : "failed";
+
+	pr_info("IMA: policy update %s\n", cause);
+	integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
+			    "policy_update", cause, !valid_policy, 0);
+
+	if (!valid_policy) {
+		ima_delete_rules();
+		valid_policy = 1;
+		clear_bit(IMA_FS_BUSY, &ima_fs_flags);
+		return 0;
+	}
+	ima_update_policy();
+	securityfs_remove(ima_policy);
+	ima_policy = NULL;
+	return 0;
+}
+
+static const struct file_operations ima_measure_policy_ops = {
+	.open = ima_open_policy,
+	.write = ima_write_policy,
+	.release = ima_release_policy,
+	.llseek = generic_file_llseek,
+};
+
+int __init ima_fs_init(void)
+{
+	ima_dir = securityfs_create_dir("ima", NULL);
+	if (IS_ERR(ima_dir))
+		return -1;
+
+	binary_runtime_measurements =
+	    securityfs_create_file("binary_runtime_measurements",
+				   S_IRUSR | S_IRGRP, ima_dir, NULL,
+				   &ima_measurements_ops);
+	if (IS_ERR(binary_runtime_measurements))
+		goto out;
+
+	ascii_runtime_measurements =
+	    securityfs_create_file("ascii_runtime_measurements",
+				   S_IRUSR | S_IRGRP, ima_dir, NULL,
+				   &ima_ascii_measurements_ops);
+	if (IS_ERR(ascii_runtime_measurements))
+		goto out;
+
+	runtime_measurements_count =
+	    securityfs_create_file("runtime_measurements_count",
+				   S_IRUSR | S_IRGRP, ima_dir, NULL,
+				   &ima_measurements_count_ops);
+	if (IS_ERR(runtime_measurements_count))
+		goto out;
+
+	violations =
+	    securityfs_create_file("violations", S_IRUSR | S_IRGRP,
+				   ima_dir, NULL, &ima_htable_violations_ops);
+	if (IS_ERR(violations))
+		goto out;
+
+	ima_policy = securityfs_create_file("policy",
+					    S_IWUSR,
+					    ima_dir, NULL,
+					    &ima_measure_policy_ops);
+	if (IS_ERR(ima_policy))
+		goto out;
+
+	return 0;
+out:
+	securityfs_remove(violations);
+	securityfs_remove(runtime_measurements_count);
+	securityfs_remove(ascii_runtime_measurements);
+	securityfs_remove(binary_runtime_measurements);
+	securityfs_remove(ima_dir);
+	securityfs_remove(ima_policy);
+	return -1;
+}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
new file mode 100644
index 0000000..e600cad
--- /dev/null
+++ b/security/integrity/ima/ima_init.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer      <sailer@watson.ibm.com>
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Mimi Zohar         <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_init.c
+ *             initialization and cleanup functions
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <crypto/hash_info.h>
+#include "ima.h"
+
+/* name for boot aggregate entry */
+static const char *boot_aggregate_name = "boot_aggregate";
+int ima_used_chip;
+
+/* Add the boot aggregate to the IMA measurement list and extend
+ * the PCR register.
+ *
+ * Calculate the boot aggregate, a SHA1 over tpm registers 0-7,
+ * assuming a TPM chip exists, and zeroes if the TPM chip does not
+ * exist.  Add the boot aggregate measurement to the measurement
+ * list and extend the PCR register.
+ *
+ * If a tpm chip does not exist, indicate the core root of trust is
+ * not hardware based by invalidating the aggregate PCR value.
+ * (The aggregate PCR value is invalidated by adding one value to
+ * the measurement list and extending the aggregate PCR value with
+ * a different value.) Violations add a zero entry to the measurement
+ * list and extend the aggregate PCR value with ff...ff's.
+ */
+static int __init ima_add_boot_aggregate(void)
+{
+	static const char op[] = "add_boot_aggregate";
+	const char *audit_cause = "ENOMEM";
+	struct ima_template_entry *entry;
+	struct integrity_iint_cache tmp_iint, *iint = &tmp_iint;
+	struct ima_event_data event_data = {iint, NULL, boot_aggregate_name,
+					    NULL, 0, NULL};
+	int result = -ENOMEM;
+	int violation = 0;
+	struct {
+		struct ima_digest_data hdr;
+		char digest[TPM_DIGEST_SIZE];
+	} hash;
+
+	memset(iint, 0, sizeof(*iint));
+	memset(&hash, 0, sizeof(hash));
+	iint->ima_hash = &hash.hdr;
+	iint->ima_hash->algo = HASH_ALGO_SHA1;
+	iint->ima_hash->length = SHA1_DIGEST_SIZE;
+
+	if (ima_used_chip) {
+		result = ima_calc_boot_aggregate(&hash.hdr);
+		if (result < 0) {
+			audit_cause = "hashing_error";
+			goto err_out;
+		}
+	}
+
+	result = ima_alloc_init_template(&event_data, &entry);
+	if (result < 0) {
+		audit_cause = "alloc_entry";
+		goto err_out;
+	}
+
+	result = ima_store_template(entry, violation, NULL,
+				    boot_aggregate_name);
+	if (result < 0) {
+		ima_free_template_entry(entry);
+		audit_cause = "store_entry";
+		goto err_out;
+	}
+	return 0;
+err_out:
+	integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
+			    audit_cause, result, 0);
+	return result;
+}
+
+#ifdef CONFIG_IMA_LOAD_X509
+void __init ima_load_x509(void)
+{
+	int unset_flags = ima_policy_flag & IMA_APPRAISE;
+
+	ima_policy_flag &= ~unset_flags;
+	integrity_load_x509(INTEGRITY_KEYRING_IMA, CONFIG_IMA_X509_PATH);
+	ima_policy_flag |= unset_flags;
+}
+#endif
+
+int __init ima_init(void)
+{
+	u8 pcr_i[TPM_DIGEST_SIZE];
+	int rc;
+
+	ima_used_chip = 0;
+	rc = tpm_pcr_read(TPM_ANY_NUM, 0, pcr_i);
+	if (rc == 0)
+		ima_used_chip = 1;
+
+	if (!ima_used_chip)
+		pr_info("No TPM chip found, activating TPM-bypass!\n");
+
+	rc = ima_init_keyring(INTEGRITY_KEYRING_IMA);
+	if (rc)
+		return rc;
+
+	rc = ima_init_crypto();
+	if (rc)
+		return rc;
+	rc = ima_init_template();
+	if (rc != 0)
+		return rc;
+
+	rc = ima_add_boot_aggregate();	/* boot aggregate must be first entry */
+	if (rc != 0)
+		return rc;
+
+	ima_init_policy();
+
+	return ima_fs_init();
+}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
new file mode 100644
index 0000000..c21f09b
--- /dev/null
+++ b/security/integrity/ima/ima_main.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Serge Hallyn <serue@us.ibm.com>
+ * Kylene Hall <kylene@us.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_main.c
+ *	implements the IMA hooks: ima_bprm_check, ima_file_mmap,
+ *	and ima_file_check.
+ */
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/binfmts.h>
+#include <linux/mount.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include <linux/ima.h>
+#include <crypto/hash_info.h>
+
+#include "ima.h"
+
+int ima_initialized;
+
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise = IMA_APPRAISE_ENFORCE;
+#else
+int ima_appraise;
+#endif
+
+int ima_hash_algo = HASH_ALGO_SHA1;
+static int hash_setup_done;
+
+static int __init hash_setup(char *str)
+{
+	struct ima_template_desc *template_desc = ima_template_desc_current();
+	int i;
+
+	if (hash_setup_done)
+		return 1;
+
+	if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+		if (strncmp(str, "sha1", 4) == 0)
+			ima_hash_algo = HASH_ALGO_SHA1;
+		else if (strncmp(str, "md5", 3) == 0)
+			ima_hash_algo = HASH_ALGO_MD5;
+		goto out;
+	}
+
+	for (i = 0; i < HASH_ALGO__LAST; i++) {
+		if (strcmp(str, hash_algo_name[i]) == 0) {
+			ima_hash_algo = i;
+			break;
+		}
+	}
+out:
+	hash_setup_done = 1;
+	return 1;
+}
+__setup("ima_hash=", hash_setup);
+
+/*
+ * ima_rdwr_violation_check
+ *
+ * Only invalidate the PCR for measured files:
+ *	- Opening a file for write when already open for read,
+ *	  results in a time of measure, time of use (ToMToU) error.
+ *	- Opening a file for read when already open for write,
+ *	  could result in a file measurement error.
+ *
+ */
+static void ima_rdwr_violation_check(struct file *file,
+				     struct integrity_iint_cache *iint,
+				     int must_measure,
+				     char **pathbuf,
+				     const char **pathname)
+{
+	struct inode *inode = file_inode(file);
+	fmode_t mode = file->f_mode;
+	bool send_tomtou = false, send_writers = false;
+
+	if (mode & FMODE_WRITE) {
+		if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
+			if (!iint)
+				iint = integrity_iint_find(inode);
+			/* IMA_MEASURE is set from reader side */
+			if (iint && (iint->flags & IMA_MEASURE))
+				send_tomtou = true;
+		}
+	} else {
+		if ((atomic_read(&inode->i_writecount) > 0) && must_measure)
+			send_writers = true;
+	}
+
+	if (!send_tomtou && !send_writers)
+		return;
+
+	*pathname = ima_d_path(&file->f_path, pathbuf);
+
+	if (send_tomtou)
+		ima_add_violation(file, *pathname, iint,
+				  "invalid_pcr", "ToMToU");
+	if (send_writers)
+		ima_add_violation(file, *pathname, iint,
+				  "invalid_pcr", "open_writers");
+}
+
+static void ima_check_last_writer(struct integrity_iint_cache *iint,
+				  struct inode *inode, struct file *file)
+{
+	fmode_t mode = file->f_mode;
+
+	if (!(mode & FMODE_WRITE))
+		return;
+
+	mutex_lock(&inode->i_mutex);
+	if (atomic_read(&inode->i_writecount) == 1) {
+		if ((iint->version != inode->i_version) ||
+		    (iint->flags & IMA_NEW_FILE)) {
+			iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
+			if (iint->flags & IMA_APPRAISE)
+				ima_update_xattr(iint, file);
+		}
+	}
+	mutex_unlock(&inode->i_mutex);
+}
+
+/**
+ * ima_file_free - called on __fput()
+ * @file: pointer to file structure being freed
+ *
+ * Flag files that changed, based on i_version
+ */
+void ima_file_free(struct file *file)
+{
+	struct inode *inode = file_inode(file);
+	struct integrity_iint_cache *iint;
+
+	if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+		return;
+
+	iint = integrity_iint_find(inode);
+	if (!iint)
+		return;
+
+	ima_check_last_writer(iint, inode, file);
+}
+
+static int process_measurement(struct file *file, int mask, int function,
+			       int opened)
+{
+	struct inode *inode = file_inode(file);
+	struct integrity_iint_cache *iint = NULL;
+	struct ima_template_desc *template_desc;
+	char *pathbuf = NULL;
+	const char *pathname = NULL;
+	int rc = -ENOMEM, action, must_appraise;
+	struct evm_ima_xattr_data *xattr_value = NULL, **xattr_ptr = NULL;
+	int xattr_len = 0;
+	bool violation_check;
+
+	if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+		return 0;
+
+	/* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action
+	 * bitmask based on the appraise/audit/measurement policy.
+	 * Included is the appraise submask.
+	 */
+	action = ima_get_action(inode, mask, function);
+	violation_check = ((function == FILE_CHECK || function == MMAP_CHECK) &&
+			   (ima_policy_flag & IMA_MEASURE));
+	if (!action && !violation_check)
+		return 0;
+
+	must_appraise = action & IMA_APPRAISE;
+
+	/*  Is the appraise rule hook specific?  */
+	if (action & IMA_FILE_APPRAISE)
+		function = FILE_CHECK;
+
+	mutex_lock(&inode->i_mutex);
+
+	if (action) {
+		iint = integrity_inode_get(inode);
+		if (!iint)
+			goto out;
+	}
+
+	if (violation_check) {
+		ima_rdwr_violation_check(file, iint, action & IMA_MEASURE,
+					 &pathbuf, &pathname);
+		if (!action) {
+			rc = 0;
+			goto out_free;
+		}
+	}
+
+	/* Determine if already appraised/measured based on bitmask
+	 * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+	 *  IMA_AUDIT, IMA_AUDITED)
+	 */
+	iint->flags |= action;
+	action &= IMA_DO_MASK;
+	action &= ~((iint->flags & IMA_DONE_MASK) >> 1);
+
+	/* Nothing to do, just return existing appraised status */
+	if (!action) {
+		if (must_appraise)
+			rc = ima_get_cache_status(iint, function);
+		goto out_digsig;
+	}
+
+	template_desc = ima_template_desc_current();
+	if ((action & IMA_APPRAISE_SUBMASK) ||
+		    strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0)
+		xattr_ptr = &xattr_value;
+
+	rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len);
+	if (rc != 0) {
+		if (file->f_flags & O_DIRECT)
+			rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES;
+		goto out_digsig;
+	}
+
+	if (!pathname)	/* ima_rdwr_violation possibly pre-fetched */
+		pathname = ima_d_path(&file->f_path, &pathbuf);
+
+	if (action & IMA_MEASURE)
+		ima_store_measurement(iint, file, pathname,
+				      xattr_value, xattr_len);
+	if (action & IMA_APPRAISE_SUBMASK)
+		rc = ima_appraise_measurement(function, iint, file, pathname,
+					      xattr_value, xattr_len, opened);
+	if (action & IMA_AUDIT)
+		ima_audit_measurement(iint, pathname);
+
+out_digsig:
+	if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG))
+		rc = -EACCES;
+	kfree(xattr_value);
+out_free:
+	if (pathbuf)
+		__putname(pathbuf);
+out:
+	mutex_unlock(&inode->i_mutex);
+	if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
+		return -EACCES;
+	return 0;
+}
+
+/**
+ * ima_file_mmap - based on policy, collect/store measurement.
+ * @file: pointer to the file to be measured (May be NULL)
+ * @prot: contains the protection that will be applied by the kernel.
+ *
+ * Measure files being mmapped executable based on the ima_must_measure()
+ * policy decision.
+ *
+ * On success return 0.  On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_file_mmap(struct file *file, unsigned long prot)
+{
+	if (file && (prot & PROT_EXEC))
+		return process_measurement(file, MAY_EXEC, MMAP_CHECK, 0);
+	return 0;
+}
+
+/**
+ * ima_bprm_check - based on policy, collect/store measurement.
+ * @bprm: contains the linux_binprm structure
+ *
+ * The OS protects against an executable file, already open for write,
+ * from being executed in deny_write_access() and an executable file,
+ * already open for execute, from being modified in get_write_access().
+ * So we can be certain that what we verify and measure here is actually
+ * what is being executed.
+ *
+ * On success return 0.  On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_bprm_check(struct linux_binprm *bprm)
+{
+	return process_measurement(bprm->file, MAY_EXEC, BPRM_CHECK, 0);
+}
+
+/**
+ * ima_path_check - based on policy, collect/store measurement.
+ * @file: pointer to the file to be measured
+ * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
+ *
+ * Measure files based on the ima_must_measure() policy decision.
+ *
+ * On success return 0.  On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_file_check(struct file *file, int mask, int opened)
+{
+	return process_measurement(file,
+				   mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
+				   FILE_CHECK, opened);
+}
+EXPORT_SYMBOL_GPL(ima_file_check);
+
+/**
+ * ima_module_check - based on policy, collect/store/appraise measurement.
+ * @file: pointer to the file to be measured/appraised
+ *
+ * Measure/appraise kernel modules based on policy.
+ *
+ * On success return 0.  On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_module_check(struct file *file)
+{
+	if (!file) {
+#ifndef CONFIG_MODULE_SIG_FORCE
+		if ((ima_appraise & IMA_APPRAISE_MODULES) &&
+		    (ima_appraise & IMA_APPRAISE_ENFORCE))
+			return -EACCES;	/* INTEGRITY_UNKNOWN */
+#endif
+		return 0;	/* We rely on module signature checking */
+	}
+	return process_measurement(file, MAY_EXEC, MODULE_CHECK, 0);
+}
+
+int ima_fw_from_file(struct file *file, char *buf, size_t size)
+{
+	if (!file) {
+		if ((ima_appraise & IMA_APPRAISE_FIRMWARE) &&
+		    (ima_appraise & IMA_APPRAISE_ENFORCE))
+			return -EACCES;	/* INTEGRITY_UNKNOWN */
+		return 0;
+	}
+	return process_measurement(file, MAY_EXEC, FIRMWARE_CHECK, 0);
+}
+
+static int __init init_ima(void)
+{
+	int error;
+
+	hash_setup(CONFIG_IMA_DEFAULT_HASH);
+	error = ima_init();
+	if (!error) {
+		ima_initialized = 1;
+		ima_update_policy_flag();
+	}
+	return error;
+}
+
+late_initcall(init_ima);	/* Start IMA after the TPM is available */
+
+MODULE_DESCRIPTION("Integrity Measurement Architecture");
+MODULE_LICENSE("GPL");
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
new file mode 100644
index 0000000..3997e20
--- /dev/null
+++ b/security/integrity/ima/ima_policy.c
@@ -0,0 +1,807 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * ima_policy.c
+ *	- initialize default measure policy rules
+ *
+ */
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/security.h>
+#include <linux/magic.h>
+#include <linux/parser.h>
+#include <linux/slab.h>
+#include <linux/genhd.h>
+
+#include "ima.h"
+
+/* flags definitions */
+#define IMA_FUNC	0x0001
+#define IMA_MASK	0x0002
+#define IMA_FSMAGIC	0x0004
+#define IMA_UID		0x0008
+#define IMA_FOWNER	0x0010
+#define IMA_FSUUID	0x0020
+#define IMA_INMASK	0x0040
+#define IMA_EUID	0x0080
+
+#define UNKNOWN		0
+#define MEASURE		0x0001	/* same as IMA_MEASURE */
+#define DONT_MEASURE	0x0002
+#define APPRAISE	0x0004	/* same as IMA_APPRAISE */
+#define DONT_APPRAISE	0x0008
+#define AUDIT		0x0040
+
+int ima_policy_flag;
+
+#define MAX_LSM_RULES 6
+enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
+	LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
+};
+
+enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
+
+struct ima_rule_entry {
+	struct list_head list;
+	int action;
+	unsigned int flags;
+	enum ima_hooks func;
+	int mask;
+	unsigned long fsmagic;
+	u8 fsuuid[16];
+	kuid_t uid;
+	kuid_t fowner;
+	struct {
+		void *rule;	/* LSM file metadata specific */
+		void *args_p;	/* audit value */
+		int type;	/* audit type */
+	} lsm[MAX_LSM_RULES];
+};
+
+/*
+ * Without LSM specific knowledge, the default policy can only be
+ * written in terms of .action, .func, .mask, .fsmagic, .uid, and .fowner
+ */
+
+/*
+ * The minimum rule set to allow for full TCB coverage.  Measures all files
+ * opened or mmap for exec and everything read by root.  Dangerous because
+ * normal users can easily run the machine out of memory simply building
+ * and running executables.
+ */
+static struct ima_rule_entry dont_measure_rules[] = {
+	{.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_MEASURE, .fsmagic = CGROUP_SUPER_MAGIC,
+	 .flags = IMA_FSMAGIC},
+	{.action = DONT_MEASURE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC}
+};
+
+static struct ima_rule_entry original_measurement_rules[] = {
+	{.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
+	 .flags = IMA_FUNC | IMA_MASK},
+	{.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
+	 .flags = IMA_FUNC | IMA_MASK},
+	{.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+	 .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_MASK | IMA_UID},
+	{.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
+	{.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
+};
+
+static struct ima_rule_entry default_measurement_rules[] = {
+	{.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
+	 .flags = IMA_FUNC | IMA_MASK},
+	{.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
+	 .flags = IMA_FUNC | IMA_MASK},
+	{.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+	 .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_INMASK | IMA_EUID},
+	{.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+	 .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_INMASK | IMA_UID},
+	{.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
+	{.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
+};
+
+static struct ima_rule_entry default_appraise_rules[] = {
+	{.action = DONT_APPRAISE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = RAMFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+#ifndef CONFIG_IMA_APPRAISE_SIGNED_INIT
+	{.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .flags = IMA_FOWNER},
+#else
+	/* force signature */
+	{.action = APPRAISE, .fowner = GLOBAL_ROOT_UID,
+	 .flags = IMA_FOWNER | IMA_DIGSIG_REQUIRED},
+#endif
+};
+
+static LIST_HEAD(ima_default_rules);
+static LIST_HEAD(ima_policy_rules);
+static struct list_head *ima_rules;
+
+static DEFINE_MUTEX(ima_rules_mutex);
+
+static int ima_policy __initdata;
+static int __init default_measure_policy_setup(char *str)
+{
+	if (ima_policy)
+		return 1;
+
+	ima_policy = ORIGINAL_TCB;
+	return 1;
+}
+__setup("ima_tcb", default_measure_policy_setup);
+
+static int __init policy_setup(char *str)
+{
+	if (ima_policy)
+		return 1;
+
+	if (strcmp(str, "tcb") == 0)
+		ima_policy = DEFAULT_TCB;
+
+	return 1;
+}
+__setup("ima_policy=", policy_setup);
+
+static bool ima_use_appraise_tcb __initdata;
+static int __init default_appraise_policy_setup(char *str)
+{
+	ima_use_appraise_tcb = 1;
+	return 1;
+}
+__setup("ima_appraise_tcb", default_appraise_policy_setup);
+
+/*
+ * Although the IMA policy does not change, the LSM policy can be
+ * reloaded, leaving the IMA LSM based rules referring to the old,
+ * stale LSM policy.
+ *
+ * Update the IMA LSM based rules to reflect the reloaded LSM policy.
+ * We assume the rules still exist; and BUG_ON() if they don't.
+ */
+static void ima_lsm_update_rules(void)
+{
+	struct ima_rule_entry *entry, *tmp;
+	int result;
+	int i;
+
+	mutex_lock(&ima_rules_mutex);
+	list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
+		for (i = 0; i < MAX_LSM_RULES; i++) {
+			if (!entry->lsm[i].rule)
+				continue;
+			result = security_filter_rule_init(entry->lsm[i].type,
+							   Audit_equal,
+							   entry->lsm[i].args_p,
+							   &entry->lsm[i].rule);
+			BUG_ON(!entry->lsm[i].rule);
+		}
+	}
+	mutex_unlock(&ima_rules_mutex);
+}
+
+/**
+ * ima_match_rules - determine whether an inode matches the measure rule.
+ * @rule: a pointer to a rule
+ * @inode: a pointer to an inode
+ * @func: LIM hook identifier
+ * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+ *
+ * Returns true on rule match, false on failure.
+ */
+static bool ima_match_rules(struct ima_rule_entry *rule,
+			    struct inode *inode, enum ima_hooks func, int mask)
+{
+	struct task_struct *tsk = current;
+	const struct cred *cred = current_cred();
+	int i;
+
+	if ((rule->flags & IMA_FUNC) &&
+	    (rule->func != func && func != POST_SETATTR))
+		return false;
+	if ((rule->flags & IMA_MASK) &&
+	    (rule->mask != mask && func != POST_SETATTR))
+		return false;
+	if ((rule->flags & IMA_INMASK) &&
+	    (!(rule->mask & mask) && func != POST_SETATTR))
+		return false;
+	if ((rule->flags & IMA_FSMAGIC)
+	    && rule->fsmagic != inode->i_sb->s_magic)
+		return false;
+	if ((rule->flags & IMA_FSUUID) &&
+	    memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid)))
+		return false;
+	if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
+		return false;
+	if (rule->flags & IMA_EUID) {
+		if (has_capability_noaudit(current, CAP_SETUID)) {
+			if (!uid_eq(rule->uid, cred->euid)
+			    && !uid_eq(rule->uid, cred->suid)
+			    && !uid_eq(rule->uid, cred->uid))
+				return false;
+		} else if (!uid_eq(rule->uid, cred->euid))
+			return false;
+	}
+
+	if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid))
+		return false;
+	for (i = 0; i < MAX_LSM_RULES; i++) {
+		int rc = 0;
+		u32 osid, sid;
+		int retried = 0;
+
+		if (!rule->lsm[i].rule)
+			continue;
+retry:
+		switch (i) {
+		case LSM_OBJ_USER:
+		case LSM_OBJ_ROLE:
+		case LSM_OBJ_TYPE:
+			security_inode_getsecid(inode, &osid);
+			rc = security_filter_rule_match(osid,
+							rule->lsm[i].type,
+							Audit_equal,
+							rule->lsm[i].rule,
+							NULL);
+			break;
+		case LSM_SUBJ_USER:
+		case LSM_SUBJ_ROLE:
+		case LSM_SUBJ_TYPE:
+			security_task_getsecid(tsk, &sid);
+			rc = security_filter_rule_match(sid,
+							rule->lsm[i].type,
+							Audit_equal,
+							rule->lsm[i].rule,
+							NULL);
+		default:
+			break;
+		}
+		if ((rc < 0) && (!retried)) {
+			retried = 1;
+			ima_lsm_update_rules();
+			goto retry;
+		}
+		if (!rc)
+			return false;
+	}
+	return true;
+}
+
+/*
+ * In addition to knowing that we need to appraise the file in general,
+ * we need to differentiate between calling hooks, for hook specific rules.
+ */
+static int get_subaction(struct ima_rule_entry *rule, int func)
+{
+	if (!(rule->flags & IMA_FUNC))
+		return IMA_FILE_APPRAISE;
+
+	switch (func) {
+	case MMAP_CHECK:
+		return IMA_MMAP_APPRAISE;
+	case BPRM_CHECK:
+		return IMA_BPRM_APPRAISE;
+	case MODULE_CHECK:
+		return IMA_MODULE_APPRAISE;
+	case FIRMWARE_CHECK:
+		return IMA_FIRMWARE_APPRAISE;
+	case FILE_CHECK:
+	default:
+		return IMA_FILE_APPRAISE;
+	}
+}
+
+/**
+ * ima_match_policy - decision based on LSM and other conditions
+ * @inode: pointer to an inode for which the policy decision is being made
+ * @func: IMA hook identifier
+ * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+ *
+ * Measure decision based on func/mask/fsmagic and LSM(subj/obj/type)
+ * conditions.
+ *
+ * (There is no need for locking when walking the policy list,
+ * as elements in the list are never deleted, nor does the list
+ * change.)
+ */
+int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
+		     int flags)
+{
+	struct ima_rule_entry *entry;
+	int action = 0, actmask = flags | (flags << 1);
+
+	list_for_each_entry(entry, ima_rules, list) {
+
+		if (!(entry->action & actmask))
+			continue;
+
+		if (!ima_match_rules(entry, inode, func, mask))
+			continue;
+
+		action |= entry->flags & IMA_ACTION_FLAGS;
+
+		action |= entry->action & IMA_DO_MASK;
+		if (entry->action & IMA_APPRAISE)
+			action |= get_subaction(entry, func);
+
+		if (entry->action & IMA_DO_MASK)
+			actmask &= ~(entry->action | entry->action << 1);
+		else
+			actmask &= ~(entry->action | entry->action >> 1);
+
+		if (!actmask)
+			break;
+	}
+
+	return action;
+}
+
+/*
+ * Initialize the ima_policy_flag variable based on the currently
+ * loaded policy.  Based on this flag, the decision to short circuit
+ * out of a function or not call the function in the first place
+ * can be made earlier.
+ */
+void ima_update_policy_flag(void)
+{
+	struct ima_rule_entry *entry;
+
+	ima_policy_flag = 0;
+	list_for_each_entry(entry, ima_rules, list) {
+		if (entry->action & IMA_DO_MASK)
+			ima_policy_flag |= entry->action;
+	}
+
+	if (!ima_appraise)
+		ima_policy_flag &= ~IMA_APPRAISE;
+}
+
+/**
+ * ima_init_policy - initialize the default measure rules.
+ *
+ * ima_rules points to either the ima_default_rules or the
+ * the new ima_policy_rules.
+ */
+void __init ima_init_policy(void)
+{
+	int i, measure_entries, appraise_entries;
+
+	/* if !ima_policy set entries = 0 so we load NO default rules */
+	measure_entries = ima_policy ? ARRAY_SIZE(dont_measure_rules) : 0;
+	appraise_entries = ima_use_appraise_tcb ?
+			 ARRAY_SIZE(default_appraise_rules) : 0;
+
+	for (i = 0; i < measure_entries; i++)
+		list_add_tail(&dont_measure_rules[i].list, &ima_default_rules);
+
+	switch (ima_policy) {
+	case ORIGINAL_TCB:
+		for (i = 0; i < ARRAY_SIZE(original_measurement_rules); i++)
+			list_add_tail(&original_measurement_rules[i].list,
+				      &ima_default_rules);
+		break;
+	case DEFAULT_TCB:
+		for (i = 0; i < ARRAY_SIZE(default_measurement_rules); i++)
+			list_add_tail(&default_measurement_rules[i].list,
+				      &ima_default_rules);
+	default:
+		break;
+	}
+
+	for (i = 0; i < appraise_entries; i++) {
+		list_add_tail(&default_appraise_rules[i].list,
+			      &ima_default_rules);
+	}
+
+	ima_rules = &ima_default_rules;
+}
+
+/**
+ * ima_update_policy - update default_rules with new measure rules
+ *
+ * Called on file .release to update the default rules with a complete new
+ * policy.  Once updated, the policy is locked, no additional rules can be
+ * added to the policy.
+ */
+void ima_update_policy(void)
+{
+	ima_rules = &ima_policy_rules;
+	ima_update_policy_flag();
+}
+
+enum {
+	Opt_err = -1,
+	Opt_measure = 1, Opt_dont_measure,
+	Opt_appraise, Opt_dont_appraise,
+	Opt_audit,
+	Opt_obj_user, Opt_obj_role, Opt_obj_type,
+	Opt_subj_user, Opt_subj_role, Opt_subj_type,
+	Opt_func, Opt_mask, Opt_fsmagic,
+	Opt_uid, Opt_euid, Opt_fowner,
+	Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
+};
+
+static match_table_t policy_tokens = {
+	{Opt_measure, "measure"},
+	{Opt_dont_measure, "dont_measure"},
+	{Opt_appraise, "appraise"},
+	{Opt_dont_appraise, "dont_appraise"},
+	{Opt_audit, "audit"},
+	{Opt_obj_user, "obj_user=%s"},
+	{Opt_obj_role, "obj_role=%s"},
+	{Opt_obj_type, "obj_type=%s"},
+	{Opt_subj_user, "subj_user=%s"},
+	{Opt_subj_role, "subj_role=%s"},
+	{Opt_subj_type, "subj_type=%s"},
+	{Opt_func, "func=%s"},
+	{Opt_mask, "mask=%s"},
+	{Opt_fsmagic, "fsmagic=%s"},
+	{Opt_fsuuid, "fsuuid=%s"},
+	{Opt_uid, "uid=%s"},
+	{Opt_euid, "euid=%s"},
+	{Opt_fowner, "fowner=%s"},
+	{Opt_appraise_type, "appraise_type=%s"},
+	{Opt_permit_directio, "permit_directio"},
+	{Opt_err, NULL}
+};
+
+static int ima_lsm_rule_init(struct ima_rule_entry *entry,
+			     substring_t *args, int lsm_rule, int audit_type)
+{
+	int result;
+
+	if (entry->lsm[lsm_rule].rule)
+		return -EINVAL;
+
+	entry->lsm[lsm_rule].args_p = match_strdup(args);
+	if (!entry->lsm[lsm_rule].args_p)
+		return -ENOMEM;
+
+	entry->lsm[lsm_rule].type = audit_type;
+	result = security_filter_rule_init(entry->lsm[lsm_rule].type,
+					   Audit_equal,
+					   entry->lsm[lsm_rule].args_p,
+					   &entry->lsm[lsm_rule].rule);
+	if (!entry->lsm[lsm_rule].rule) {
+		kfree(entry->lsm[lsm_rule].args_p);
+		return -EINVAL;
+	}
+
+	return result;
+}
+
+static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
+{
+	audit_log_format(ab, "%s=", key);
+	audit_log_untrustedstring(ab, value);
+	audit_log_format(ab, " ");
+}
+
+static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+{
+	struct audit_buffer *ab;
+	char *from;
+	char *p;
+	int result = 0;
+
+	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE);
+
+	entry->uid = INVALID_UID;
+	entry->fowner = INVALID_UID;
+	entry->action = UNKNOWN;
+	while ((p = strsep(&rule, " \t")) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+		int token;
+		unsigned long lnum;
+
+		if (result < 0)
+			break;
+		if ((*p == '\0') || (*p == ' ') || (*p == '\t'))
+			continue;
+		token = match_token(p, policy_tokens, args);
+		switch (token) {
+		case Opt_measure:
+			ima_log_string(ab, "action", "measure");
+
+			if (entry->action != UNKNOWN)
+				result = -EINVAL;
+
+			entry->action = MEASURE;
+			break;
+		case Opt_dont_measure:
+			ima_log_string(ab, "action", "dont_measure");
+
+			if (entry->action != UNKNOWN)
+				result = -EINVAL;
+
+			entry->action = DONT_MEASURE;
+			break;
+		case Opt_appraise:
+			ima_log_string(ab, "action", "appraise");
+
+			if (entry->action != UNKNOWN)
+				result = -EINVAL;
+
+			entry->action = APPRAISE;
+			break;
+		case Opt_dont_appraise:
+			ima_log_string(ab, "action", "dont_appraise");
+
+			if (entry->action != UNKNOWN)
+				result = -EINVAL;
+
+			entry->action = DONT_APPRAISE;
+			break;
+		case Opt_audit:
+			ima_log_string(ab, "action", "audit");
+
+			if (entry->action != UNKNOWN)
+				result = -EINVAL;
+
+			entry->action = AUDIT;
+			break;
+		case Opt_func:
+			ima_log_string(ab, "func", args[0].from);
+
+			if (entry->func)
+				result = -EINVAL;
+
+			if (strcmp(args[0].from, "FILE_CHECK") == 0)
+				entry->func = FILE_CHECK;
+			/* PATH_CHECK is for backwards compat */
+			else if (strcmp(args[0].from, "PATH_CHECK") == 0)
+				entry->func = FILE_CHECK;
+			else if (strcmp(args[0].from, "MODULE_CHECK") == 0)
+				entry->func = MODULE_CHECK;
+			else if (strcmp(args[0].from, "FIRMWARE_CHECK") == 0)
+				entry->func = FIRMWARE_CHECK;
+			else if ((strcmp(args[0].from, "FILE_MMAP") == 0)
+				|| (strcmp(args[0].from, "MMAP_CHECK") == 0))
+				entry->func = MMAP_CHECK;
+			else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
+				entry->func = BPRM_CHECK;
+			else
+				result = -EINVAL;
+			if (!result)
+				entry->flags |= IMA_FUNC;
+			break;
+		case Opt_mask:
+			ima_log_string(ab, "mask", args[0].from);
+
+			if (entry->mask)
+				result = -EINVAL;
+
+			from = args[0].from;
+			if (*from == '^')
+				from++;
+
+			if ((strcmp(from, "MAY_EXEC")) == 0)
+				entry->mask = MAY_EXEC;
+			else if (strcmp(from, "MAY_WRITE") == 0)
+				entry->mask = MAY_WRITE;
+			else if (strcmp(from, "MAY_READ") == 0)
+				entry->mask = MAY_READ;
+			else if (strcmp(from, "MAY_APPEND") == 0)
+				entry->mask = MAY_APPEND;
+			else
+				result = -EINVAL;
+			if (!result)
+				entry->flags |= (*args[0].from == '^')
+				     ? IMA_INMASK : IMA_MASK;
+			break;
+		case Opt_fsmagic:
+			ima_log_string(ab, "fsmagic", args[0].from);
+
+			if (entry->fsmagic) {
+				result = -EINVAL;
+				break;
+			}
+
+			result = kstrtoul(args[0].from, 16, &entry->fsmagic);
+			if (!result)
+				entry->flags |= IMA_FSMAGIC;
+			break;
+		case Opt_fsuuid:
+			ima_log_string(ab, "fsuuid", args[0].from);
+
+			if (memchr_inv(entry->fsuuid, 0x00,
+				       sizeof(entry->fsuuid))) {
+				result = -EINVAL;
+				break;
+			}
+
+			result = blk_part_pack_uuid(args[0].from,
+						    entry->fsuuid);
+			if (!result)
+				entry->flags |= IMA_FSUUID;
+			break;
+		case Opt_uid:
+			ima_log_string(ab, "uid", args[0].from);
+		case Opt_euid:
+			if (token == Opt_euid)
+				ima_log_string(ab, "euid", args[0].from);
+
+			if (uid_valid(entry->uid)) {
+				result = -EINVAL;
+				break;
+			}
+
+			result = kstrtoul(args[0].from, 10, &lnum);
+			if (!result) {
+				entry->uid = make_kuid(current_user_ns(),
+						       (uid_t) lnum);
+				if (!uid_valid(entry->uid) ||
+				    (uid_t)lnum != lnum)
+					result = -EINVAL;
+				else
+					entry->flags |= (token == Opt_uid)
+					    ? IMA_UID : IMA_EUID;
+			}
+			break;
+		case Opt_fowner:
+			ima_log_string(ab, "fowner", args[0].from);
+
+			if (uid_valid(entry->fowner)) {
+				result = -EINVAL;
+				break;
+			}
+
+			result = kstrtoul(args[0].from, 10, &lnum);
+			if (!result) {
+				entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum);
+				if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum))
+					result = -EINVAL;
+				else
+					entry->flags |= IMA_FOWNER;
+			}
+			break;
+		case Opt_obj_user:
+			ima_log_string(ab, "obj_user", args[0].from);
+			result = ima_lsm_rule_init(entry, args,
+						   LSM_OBJ_USER,
+						   AUDIT_OBJ_USER);
+			break;
+		case Opt_obj_role:
+			ima_log_string(ab, "obj_role", args[0].from);
+			result = ima_lsm_rule_init(entry, args,
+						   LSM_OBJ_ROLE,
+						   AUDIT_OBJ_ROLE);
+			break;
+		case Opt_obj_type:
+			ima_log_string(ab, "obj_type", args[0].from);
+			result = ima_lsm_rule_init(entry, args,
+						   LSM_OBJ_TYPE,
+						   AUDIT_OBJ_TYPE);
+			break;
+		case Opt_subj_user:
+			ima_log_string(ab, "subj_user", args[0].from);
+			result = ima_lsm_rule_init(entry, args,
+						   LSM_SUBJ_USER,
+						   AUDIT_SUBJ_USER);
+			break;
+		case Opt_subj_role:
+			ima_log_string(ab, "subj_role", args[0].from);
+			result = ima_lsm_rule_init(entry, args,
+						   LSM_SUBJ_ROLE,
+						   AUDIT_SUBJ_ROLE);
+			break;
+		case Opt_subj_type:
+			ima_log_string(ab, "subj_type", args[0].from);
+			result = ima_lsm_rule_init(entry, args,
+						   LSM_SUBJ_TYPE,
+						   AUDIT_SUBJ_TYPE);
+			break;
+		case Opt_appraise_type:
+			if (entry->action != APPRAISE) {
+				result = -EINVAL;
+				break;
+			}
+
+			ima_log_string(ab, "appraise_type", args[0].from);
+			if ((strcmp(args[0].from, "imasig")) == 0)
+				entry->flags |= IMA_DIGSIG_REQUIRED;
+			else
+				result = -EINVAL;
+			break;
+		case Opt_permit_directio:
+			entry->flags |= IMA_PERMIT_DIRECTIO;
+			break;
+		case Opt_err:
+			ima_log_string(ab, "UNKNOWN", p);
+			result = -EINVAL;
+			break;
+		}
+	}
+	if (!result && (entry->action == UNKNOWN))
+		result = -EINVAL;
+	else if (entry->func == MODULE_CHECK)
+		ima_appraise |= IMA_APPRAISE_MODULES;
+	else if (entry->func == FIRMWARE_CHECK)
+		ima_appraise |= IMA_APPRAISE_FIRMWARE;
+	audit_log_format(ab, "res=%d", !result);
+	audit_log_end(ab);
+	return result;
+}
+
+/**
+ * ima_parse_add_rule - add a rule to ima_policy_rules
+ * @rule - ima measurement policy rule
+ *
+ * Uses a mutex to protect the policy list from multiple concurrent writers.
+ * Returns the length of the rule parsed, an error code on failure
+ */
+ssize_t ima_parse_add_rule(char *rule)
+{
+	static const char op[] = "update_policy";
+	char *p;
+	struct ima_rule_entry *entry;
+	ssize_t result, len;
+	int audit_info = 0;
+
+	p = strsep(&rule, "\n");
+	len = strlen(p) + 1;
+	p += strspn(p, " \t");
+
+	if (*p == '#' || *p == '\0')
+		return len;
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
+				    NULL, op, "-ENOMEM", -ENOMEM, audit_info);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&entry->list);
+
+	result = ima_parse_rule(p, entry);
+	if (result) {
+		kfree(entry);
+		integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
+				    NULL, op, "invalid-policy", result,
+				    audit_info);
+		return result;
+	}
+
+	mutex_lock(&ima_rules_mutex);
+	list_add_tail(&entry->list, &ima_policy_rules);
+	mutex_unlock(&ima_rules_mutex);
+
+	return len;
+}
+
+/* ima_delete_rules called to cleanup invalid policy */
+void ima_delete_rules(void)
+{
+	struct ima_rule_entry *entry, *tmp;
+	int i;
+
+	mutex_lock(&ima_rules_mutex);
+	list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
+		for (i = 0; i < MAX_LSM_RULES; i++)
+			kfree(entry->lsm[i].args_p);
+
+		list_del(&entry->list);
+		kfree(entry);
+	}
+	mutex_unlock(&ima_rules_mutex);
+}
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
new file mode 100644
index 0000000..552705d
--- /dev/null
+++ b/security/integrity/ima/ima_queue.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Serge Hallyn <serue@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_queue.c
+ *       Implements queues that store template measurements and
+ *       maintains aggregate over the stored measurements
+ *       in the pre-configured TPM PCR (if available).
+ *       The measurement list is append-only. No entry is
+ *       ever removed or changed during the boot-cycle.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include "ima.h"
+
+#define AUDIT_CAUSE_LEN_MAX 32
+
+LIST_HEAD(ima_measurements);	/* list of all measurements */
+
+/* key: inode (before secure-hashing a file) */
+struct ima_h_table ima_htable = {
+	.len = ATOMIC_LONG_INIT(0),
+	.violations = ATOMIC_LONG_INIT(0),
+	.queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
+};
+
+/* mutex protects atomicity of extending measurement list
+ * and extending the TPM PCR aggregate. Since tpm_extend can take
+ * long (and the tpm driver uses a mutex), we can't use the spinlock.
+ */
+static DEFINE_MUTEX(ima_extend_list_mutex);
+
+/* lookup up the digest value in the hash table, and return the entry */
+static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
+{
+	struct ima_queue_entry *qe, *ret = NULL;
+	unsigned int key;
+	int rc;
+
+	key = ima_hash_key(digest_value);
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
+		rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE);
+		if (rc == 0) {
+			ret = qe;
+			break;
+		}
+	}
+	rcu_read_unlock();
+	return ret;
+}
+
+/* ima_add_template_entry helper function:
+ * - Add template entry to measurement list and hash table.
+ *
+ * (Called with ima_extend_list_mutex held.)
+ */
+static int ima_add_digest_entry(struct ima_template_entry *entry)
+{
+	struct ima_queue_entry *qe;
+	unsigned int key;
+
+	qe = kmalloc(sizeof(*qe), GFP_KERNEL);
+	if (qe == NULL) {
+		pr_err("OUT OF MEMORY ERROR creating queue entry\n");
+		return -ENOMEM;
+	}
+	qe->entry = entry;
+
+	INIT_LIST_HEAD(&qe->later);
+	list_add_tail_rcu(&qe->later, &ima_measurements);
+
+	atomic_long_inc(&ima_htable.len);
+	key = ima_hash_key(entry->digest);
+	hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+	return 0;
+}
+
+static int ima_pcr_extend(const u8 *hash)
+{
+	int result = 0;
+
+	if (!ima_used_chip)
+		return result;
+
+	result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
+	if (result != 0)
+		pr_err("Error Communicating to TPM chip, result: %d\n", result);
+	return result;
+}
+
+/* Add template entry to the measurement list and hash table,
+ * and extend the pcr.
+ */
+int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+			   const char *op, struct inode *inode,
+			   const unsigned char *filename)
+{
+	u8 digest[TPM_DIGEST_SIZE];
+	const char *audit_cause = "hash_added";
+	char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
+	int audit_info = 1;
+	int result = 0, tpmresult = 0;
+
+	mutex_lock(&ima_extend_list_mutex);
+	if (!violation) {
+		memcpy(digest, entry->digest, sizeof(digest));
+		if (ima_lookup_digest_entry(digest)) {
+			audit_cause = "hash_exists";
+			result = -EEXIST;
+			goto out;
+		}
+	}
+
+	result = ima_add_digest_entry(entry);
+	if (result < 0) {
+		audit_cause = "ENOMEM";
+		audit_info = 0;
+		goto out;
+	}
+
+	if (violation)		/* invalidate pcr */
+		memset(digest, 0xff, sizeof(digest));
+
+	tpmresult = ima_pcr_extend(digest);
+	if (tpmresult != 0) {
+		snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
+			 tpmresult);
+		audit_cause = tpm_audit_cause;
+		audit_info = 0;
+	}
+out:
+	mutex_unlock(&ima_extend_list_mutex);
+	integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
+			    op, audit_cause, result, audit_info);
+	return result;
+}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
new file mode 100644
index 0000000..0b7404e
--- /dev/null
+++ b/security/integrity/ima/ima_template.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ *                    TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template.c
+ *      Helpers to manage template descriptors.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/hash_info.h>
+
+#include "ima.h"
+#include "ima_template_lib.h"
+
+static struct ima_template_desc defined_templates[] = {
+	{.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
+	{.name = "ima-ng", .fmt = "d-ng|n-ng"},
+	{.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
+	{.name = "", .fmt = ""},	/* placeholder for a custom format */
+};
+
+static struct ima_template_field supported_fields[] = {
+	{.field_id = "d", .field_init = ima_eventdigest_init,
+	 .field_show = ima_show_template_digest},
+	{.field_id = "n", .field_init = ima_eventname_init,
+	 .field_show = ima_show_template_string},
+	{.field_id = "d-ng", .field_init = ima_eventdigest_ng_init,
+	 .field_show = ima_show_template_digest_ng},
+	{.field_id = "n-ng", .field_init = ima_eventname_ng_init,
+	 .field_show = ima_show_template_string},
+	{.field_id = "sig", .field_init = ima_eventsig_init,
+	 .field_show = ima_show_template_sig},
+};
+
+static struct ima_template_desc *ima_template;
+static struct ima_template_desc *lookup_template_desc(const char *name);
+static int template_desc_init_fields(const char *template_fmt,
+				     struct ima_template_field ***fields,
+				     int *num_fields);
+
+static int __init ima_template_setup(char *str)
+{
+	struct ima_template_desc *template_desc;
+	int template_len = strlen(str);
+
+	if (ima_template)
+		return 1;
+
+	/*
+	 * Verify that a template with the supplied name exists.
+	 * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
+	 */
+	template_desc = lookup_template_desc(str);
+	if (!template_desc) {
+		pr_err("template %s not found, using %s\n",
+		       str, CONFIG_IMA_DEFAULT_TEMPLATE);
+		return 1;
+	}
+
+	/*
+	 * Verify whether the current hash algorithm is supported
+	 * by the 'ima' template.
+	 */
+	if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 &&
+	    ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) {
+		pr_err("template does not support hash alg\n");
+		return 1;
+	}
+
+	ima_template = template_desc;
+	return 1;
+}
+__setup("ima_template=", ima_template_setup);
+
+static int __init ima_template_fmt_setup(char *str)
+{
+	int num_templates = ARRAY_SIZE(defined_templates);
+
+	if (ima_template)
+		return 1;
+
+	if (template_desc_init_fields(str, NULL, NULL) < 0) {
+		pr_err("format string '%s' not valid, using template %s\n",
+		       str, CONFIG_IMA_DEFAULT_TEMPLATE);
+		return 1;
+	}
+
+	defined_templates[num_templates - 1].fmt = str;
+	ima_template = defined_templates + num_templates - 1;
+	return 1;
+}
+__setup("ima_template_fmt=", ima_template_fmt_setup);
+
+static struct ima_template_desc *lookup_template_desc(const char *name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
+		if (strcmp(defined_templates[i].name, name) == 0)
+			return defined_templates + i;
+	}
+
+	return NULL;
+}
+
+static struct ima_template_field *lookup_template_field(const char *field_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(supported_fields); i++)
+		if (strncmp(supported_fields[i].field_id, field_id,
+			    IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0)
+			return &supported_fields[i];
+	return NULL;
+}
+
+static int template_fmt_size(const char *template_fmt)
+{
+	char c;
+	int template_fmt_len = strlen(template_fmt);
+	int i = 0, j = 0;
+
+	while (i < template_fmt_len) {
+		c = template_fmt[i];
+		if (c == '|')
+			j++;
+		i++;
+	}
+
+	return j + 1;
+}
+
+static int template_desc_init_fields(const char *template_fmt,
+				     struct ima_template_field ***fields,
+				     int *num_fields)
+{
+	const char *template_fmt_ptr;
+	struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
+	int template_num_fields = template_fmt_size(template_fmt);
+	int i, len;
+
+	if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {
+		pr_err("format string '%s' contains too many fields\n",
+		       template_fmt);
+		return -EINVAL;
+	}
+
+	for (i = 0, template_fmt_ptr = template_fmt; i < template_num_fields;
+	     i++, template_fmt_ptr += len + 1) {
+		char tmp_field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN + 1];
+
+		len = strchrnul(template_fmt_ptr, '|') - template_fmt_ptr;
+		if (len == 0 || len > IMA_TEMPLATE_FIELD_ID_MAX_LEN) {
+			pr_err("Invalid field with length %d\n", len);
+			return -EINVAL;
+		}
+
+		memcpy(tmp_field_id, template_fmt_ptr, len);
+		tmp_field_id[len] = '\0';
+		found_fields[i] = lookup_template_field(tmp_field_id);
+		if (!found_fields[i]) {
+			pr_err("field '%s' not found\n", tmp_field_id);
+			return -ENOENT;
+		}
+	}
+
+	if (fields && num_fields) {
+		*fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL);
+		if (*fields == NULL)
+			return -ENOMEM;
+
+		memcpy(*fields, found_fields, i * sizeof(*fields));
+		*num_fields = i;
+	}
+
+	return 0;
+}
+
+struct ima_template_desc *ima_template_desc_current(void)
+{
+	if (!ima_template)
+		ima_template =
+		    lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+	return ima_template;
+}
+
+int __init ima_init_template(void)
+{
+	struct ima_template_desc *template = ima_template_desc_current();
+	int result;
+
+	result = template_desc_init_fields(template->fmt,
+					   &(template->fields),
+					   &(template->num_fields));
+	if (result < 0)
+		pr_err("template %s init failed, result: %d\n",
+		       (strlen(template->name) ?
+		       template->name : template->fmt), result);
+
+	return result;
+}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
new file mode 100644
index 0000000..2934e3d
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ *                    TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.c
+ *      Library of supported template fields.
+ */
+#include <crypto/hash_info.h>
+
+#include "ima_template_lib.h"
+
+static bool ima_template_hash_algo_allowed(u8 algo)
+{
+	if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5)
+		return true;
+
+	return false;
+}
+
+enum data_formats {
+	DATA_FMT_DIGEST = 0,
+	DATA_FMT_DIGEST_WITH_ALGO,
+	DATA_FMT_STRING,
+	DATA_FMT_HEX
+};
+
+static int ima_write_template_field_data(const void *data, const u32 datalen,
+					 enum data_formats datafmt,
+					 struct ima_field_data *field_data)
+{
+	u8 *buf, *buf_ptr;
+	u32 buflen = datalen;
+
+	if (datafmt == DATA_FMT_STRING)
+		buflen = datalen + 1;
+
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	memcpy(buf, data, datalen);
+
+	/*
+	 * Replace all space characters with underscore for event names and
+	 * strings. This avoid that, during the parsing of a measurements list,
+	 * filenames with spaces or that end with the suffix ' (deleted)' are
+	 * split into multiple template fields (the space is the delimitator
+	 * character for measurements lists in ASCII format).
+	 */
+	if (datafmt == DATA_FMT_STRING) {
+		for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++)
+			if (*buf_ptr == ' ')
+				*buf_ptr = '_';
+	}
+
+	field_data->data = buf;
+	field_data->len = buflen;
+	return 0;
+}
+
+static void ima_show_template_data_ascii(struct seq_file *m,
+					 enum ima_show_type show,
+					 enum data_formats datafmt,
+					 struct ima_field_data *field_data)
+{
+	u8 *buf_ptr = field_data->data;
+	u32 buflen = field_data->len;
+
+	switch (datafmt) {
+	case DATA_FMT_DIGEST_WITH_ALGO:
+		buf_ptr = strnchr(field_data->data, buflen, ':');
+		if (buf_ptr != field_data->data)
+			seq_printf(m, "%s", field_data->data);
+
+		/* skip ':' and '\0' */
+		buf_ptr += 2;
+		buflen -= buf_ptr - field_data->data;
+	case DATA_FMT_DIGEST:
+	case DATA_FMT_HEX:
+		if (!buflen)
+			break;
+		ima_print_digest(m, buf_ptr, buflen);
+		break;
+	case DATA_FMT_STRING:
+		seq_printf(m, "%s", buf_ptr);
+		break;
+	default:
+		break;
+	}
+}
+
+static void ima_show_template_data_binary(struct seq_file *m,
+					  enum ima_show_type show,
+					  enum data_formats datafmt,
+					  struct ima_field_data *field_data)
+{
+	u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
+	    strlen(field_data->data) : field_data->len;
+
+	if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
+		ima_putc(m, &len, sizeof(len));
+
+	if (!len)
+		return;
+
+	ima_putc(m, field_data->data, len);
+}
+
+static void ima_show_template_field_data(struct seq_file *m,
+					 enum ima_show_type show,
+					 enum data_formats datafmt,
+					 struct ima_field_data *field_data)
+{
+	switch (show) {
+	case IMA_SHOW_ASCII:
+		ima_show_template_data_ascii(m, show, datafmt, field_data);
+		break;
+	case IMA_SHOW_BINARY:
+	case IMA_SHOW_BINARY_NO_FIELD_LEN:
+	case IMA_SHOW_BINARY_OLD_STRING_FMT:
+		ima_show_template_data_binary(m, show, datafmt, field_data);
+		break;
+	default:
+		break;
+	}
+}
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+			      struct ima_field_data *field_data)
+{
+	ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data);
+}
+
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+				 struct ima_field_data *field_data)
+{
+	ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO,
+				     field_data);
+}
+
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+			      struct ima_field_data *field_data)
+{
+	ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data);
+}
+
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+			   struct ima_field_data *field_data)
+{
+	ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
+static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo,
+				       struct ima_field_data *field_data)
+{
+	/*
+	 * digest formats:
+	 *  - DATA_FMT_DIGEST: digest
+	 *  - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest,
+	 *    where <hash algo> is provided if the hash algoritm is not
+	 *    SHA1 or MD5
+	 */
+	u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 };
+	enum data_formats fmt = DATA_FMT_DIGEST;
+	u32 offset = 0;
+
+	if (hash_algo < HASH_ALGO__LAST) {
+		fmt = DATA_FMT_DIGEST_WITH_ALGO;
+		offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1, "%s",
+				   hash_algo_name[hash_algo]);
+		buffer[offset] = ':';
+		offset += 2;
+	}
+
+	if (digest)
+		memcpy(buffer + offset, digest, digestsize);
+	else
+		/*
+		 * If digest is NULL, the event being recorded is a violation.
+		 * Make room for the digest by increasing the offset of
+		 * IMA_DIGEST_SIZE.
+		 */
+		offset += IMA_DIGEST_SIZE;
+
+	return ima_write_template_field_data(buffer, offset + digestsize,
+					     fmt, field_data);
+}
+
+/*
+ * This function writes the digest of an event (with size limit).
+ */
+int ima_eventdigest_init(struct ima_event_data *event_data,
+			 struct ima_field_data *field_data)
+{
+	struct {
+		struct ima_digest_data hdr;
+		char digest[IMA_MAX_DIGEST_SIZE];
+	} hash;
+	u8 *cur_digest = NULL;
+	u32 cur_digestsize = 0;
+	struct inode *inode;
+	int result;
+
+	memset(&hash, 0, sizeof(hash));
+
+	if (event_data->violation)	/* recording a violation. */
+		goto out;
+
+	if (ima_template_hash_algo_allowed(event_data->iint->ima_hash->algo)) {
+		cur_digest = event_data->iint->ima_hash->digest;
+		cur_digestsize = event_data->iint->ima_hash->length;
+		goto out;
+	}
+
+	if (!event_data->file)	/* missing info to re-calculate the digest */
+		return -EINVAL;
+
+	inode = file_inode(event_data->file);
+	hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ?
+	    ima_hash_algo : HASH_ALGO_SHA1;
+	result = ima_calc_file_hash(event_data->file, &hash.hdr);
+	if (result) {
+		integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+				    event_data->filename, "collect_data",
+				    "failed", result, 0);
+		return result;
+	}
+	cur_digest = hash.hdr.digest;
+	cur_digestsize = hash.hdr.length;
+out:
+	return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+					   HASH_ALGO__LAST, field_data);
+}
+
+/*
+ * This function writes the digest of an event (without size limit).
+ */
+int ima_eventdigest_ng_init(struct ima_event_data *event_data,
+			    struct ima_field_data *field_data)
+{
+	u8 *cur_digest = NULL, hash_algo = HASH_ALGO_SHA1;
+	u32 cur_digestsize = 0;
+
+	if (event_data->violation)	/* recording a violation. */
+		goto out;
+
+	cur_digest = event_data->iint->ima_hash->digest;
+	cur_digestsize = event_data->iint->ima_hash->length;
+
+	hash_algo = event_data->iint->ima_hash->algo;
+out:
+	return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+					   hash_algo, field_data);
+}
+
+static int ima_eventname_init_common(struct ima_event_data *event_data,
+				     struct ima_field_data *field_data,
+				     bool size_limit)
+{
+	const char *cur_filename = NULL;
+	u32 cur_filename_len = 0;
+
+	BUG_ON(event_data->filename == NULL && event_data->file == NULL);
+
+	if (event_data->filename) {
+		cur_filename = event_data->filename;
+		cur_filename_len = strlen(event_data->filename);
+
+		if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX)
+			goto out;
+	}
+
+	if (event_data->file) {
+		cur_filename = event_data->file->f_path.dentry->d_name.name;
+		cur_filename_len = strlen(cur_filename);
+	} else
+		/*
+		 * Truncate filename if the latter is too long and
+		 * the file descriptor is not available.
+		 */
+		cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+out:
+	return ima_write_template_field_data(cur_filename, cur_filename_len,
+					     DATA_FMT_STRING, field_data);
+}
+
+/*
+ * This function writes the name of an event (with size limit).
+ */
+int ima_eventname_init(struct ima_event_data *event_data,
+		       struct ima_field_data *field_data)
+{
+	return ima_eventname_init_common(event_data, field_data, true);
+}
+
+/*
+ * This function writes the name of an event (without size limit).
+ */
+int ima_eventname_ng_init(struct ima_event_data *event_data,
+			  struct ima_field_data *field_data)
+{
+	return ima_eventname_init_common(event_data, field_data, false);
+}
+
+/*
+ *  ima_eventsig_init - include the file signature as part of the template data
+ */
+int ima_eventsig_init(struct ima_event_data *event_data,
+		      struct ima_field_data *field_data)
+{
+	enum data_formats fmt = DATA_FMT_HEX;
+	struct evm_ima_xattr_data *xattr_value = event_data->xattr_value;
+	int xattr_len = event_data->xattr_len;
+	int rc = 0;
+
+	if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG))
+		goto out;
+
+	rc = ima_write_template_field_data(xattr_value, xattr_len, fmt,
+					   field_data);
+out:
+	return rc;
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
new file mode 100644
index 0000000..c344530
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ *                    TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.h
+ *      Header for the library of supported template fields.
+ */
+#ifndef __LINUX_IMA_TEMPLATE_LIB_H
+#define __LINUX_IMA_TEMPLATE_LIB_H
+
+#include <linux/seq_file.h>
+#include "ima.h"
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+			      struct ima_field_data *field_data);
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+				 struct ima_field_data *field_data);
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+			      struct ima_field_data *field_data);
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+			   struct ima_field_data *field_data);
+int ima_eventdigest_init(struct ima_event_data *event_data,
+			 struct ima_field_data *field_data);
+int ima_eventname_init(struct ima_event_data *event_data,
+		       struct ima_field_data *field_data);
+int ima_eventdigest_ng_init(struct ima_event_data *event_data,
+			    struct ima_field_data *field_data);
+int ima_eventname_ng_init(struct ima_event_data *event_data,
+			  struct ima_field_data *field_data);
+int ima_eventsig_init(struct ima_event_data *event_data,
+		      struct ima_field_data *field_data);
+#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
new file mode 100644
index 0000000..9c61687
--- /dev/null
+++ b/security/integrity/integrity.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2009-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/integrity.h>
+#include <crypto/sha.h>
+#include <linux/key.h>
+
+/* iint action cache flags */
+#define IMA_MEASURE		0x00000001
+#define IMA_MEASURED		0x00000002
+#define IMA_APPRAISE		0x00000004
+#define IMA_APPRAISED		0x00000008
+/*#define IMA_COLLECT		0x00000010  do not use this flag */
+#define IMA_COLLECTED		0x00000020
+#define IMA_AUDIT		0x00000040
+#define IMA_AUDITED		0x00000080
+
+/* iint cache flags */
+#define IMA_ACTION_FLAGS	0xff000000
+#define IMA_DIGSIG		0x01000000
+#define IMA_DIGSIG_REQUIRED	0x02000000
+#define IMA_PERMIT_DIRECTIO	0x04000000
+#define IMA_NEW_FILE		0x08000000
+
+#define IMA_DO_MASK		(IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+				 IMA_APPRAISE_SUBMASK)
+#define IMA_DONE_MASK		(IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
+				 IMA_COLLECTED | IMA_APPRAISED_SUBMASK)
+
+/* iint subaction appraise cache flags */
+#define IMA_FILE_APPRAISE	0x00000100
+#define IMA_FILE_APPRAISED	0x00000200
+#define IMA_MMAP_APPRAISE	0x00000400
+#define IMA_MMAP_APPRAISED	0x00000800
+#define IMA_BPRM_APPRAISE	0x00001000
+#define IMA_BPRM_APPRAISED	0x00002000
+#define IMA_MODULE_APPRAISE	0x00004000
+#define IMA_MODULE_APPRAISED	0x00008000
+#define IMA_FIRMWARE_APPRAISE	0x00010000
+#define IMA_FIRMWARE_APPRAISED	0x00020000
+#define IMA_APPRAISE_SUBMASK	(IMA_FILE_APPRAISE | IMA_MMAP_APPRAISE | \
+				 IMA_BPRM_APPRAISE | IMA_MODULE_APPRAISE | \
+				 IMA_FIRMWARE_APPRAISE)
+#define IMA_APPRAISED_SUBMASK	(IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \
+				 IMA_BPRM_APPRAISED | IMA_MODULE_APPRAISED | \
+				 IMA_FIRMWARE_APPRAISED)
+
+enum evm_ima_xattr_type {
+	IMA_XATTR_DIGEST = 0x01,
+	EVM_XATTR_HMAC,
+	EVM_IMA_XATTR_DIGSIG,
+	IMA_XATTR_DIGEST_NG,
+	IMA_XATTR_LAST
+};
+
+struct evm_ima_xattr_data {
+	u8 type;
+	u8 digest[SHA1_DIGEST_SIZE];
+} __packed;
+
+#define IMA_MAX_DIGEST_SIZE	64
+
+struct ima_digest_data {
+	u8 algo;
+	u8 length;
+	union {
+		struct {
+			u8 unused;
+			u8 type;
+		} sha1;
+		struct {
+			u8 type;
+			u8 algo;
+		} ng;
+		u8 data[2];
+	} xattr;
+	u8 digest[0];
+} __packed;
+
+/*
+ * signature format v2 - for using with asymmetric keys
+ */
+struct signature_v2_hdr {
+	uint8_t type;		/* xattr type */
+	uint8_t version;	/* signature format version */
+	uint8_t	hash_algo;	/* Digest algorithm [enum pkey_hash_algo] */
+	uint32_t keyid;		/* IMA key identifier - not X509/PGP specific */
+	uint16_t sig_size;	/* signature size */
+	uint8_t sig[0];		/* signature payload */
+} __packed;
+
+/* integrity data associated with an inode */
+struct integrity_iint_cache {
+	struct rb_node rb_node;	/* rooted in integrity_iint_tree */
+	struct inode *inode;	/* back pointer to inode in question */
+	u64 version;		/* track inode changes */
+	unsigned long flags;
+	enum integrity_status ima_file_status:4;
+	enum integrity_status ima_mmap_status:4;
+	enum integrity_status ima_bprm_status:4;
+	enum integrity_status ima_module_status:4;
+	enum integrity_status ima_firmware_status:4;
+	enum integrity_status evm_status:4;
+	struct ima_digest_data *ima_hash;
+};
+
+/* rbtree tree calls to lookup, insert, delete
+ * integrity data associated with an inode.
+ */
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
+
+int integrity_kernel_read(struct file *file, loff_t offset,
+			  char *addr, unsigned long count);
+int __init integrity_read_file(const char *path, char **data);
+
+#define INTEGRITY_KEYRING_EVM		0
+#define INTEGRITY_KEYRING_MODULE	1
+#define INTEGRITY_KEYRING_IMA		2
+#define INTEGRITY_KEYRING_MAX		3
+
+#ifdef CONFIG_INTEGRITY_SIGNATURE
+
+int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+			    const char *digest, int digestlen);
+
+int __init integrity_init_keyring(const unsigned int id);
+int __init integrity_load_x509(const unsigned int id, const char *path);
+#else
+
+static inline int integrity_digsig_verify(const unsigned int id,
+					  const char *sig, int siglen,
+					  const char *digest, int digestlen)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int integrity_init_keyring(const unsigned int id)
+{
+	return 0;
+}
+
+#endif /* CONFIG_INTEGRITY_SIGNATURE */
+
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+int asymmetric_verify(struct key *keyring, const char *sig,
+		      int siglen, const char *data, int datalen);
+#else
+static inline int asymmetric_verify(struct key *keyring, const char *sig,
+				    int siglen, const char *data, int datalen)
+{
+	return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_IMA_LOAD_X509
+void __init ima_load_x509(void);
+#else
+static inline void ima_load_x509(void)
+{
+}
+#endif
+
+#ifdef CONFIG_INTEGRITY_AUDIT
+/* declarations */
+void integrity_audit_msg(int audit_msgno, struct inode *inode,
+			 const unsigned char *fname, const char *op,
+			 const char *cause, int result, int info);
+#else
+static inline void integrity_audit_msg(int audit_msgno, struct inode *inode,
+				       const unsigned char *fname,
+				       const char *op, const char *cause,
+				       int result, int info)
+{
+}
+#endif
diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c
new file mode 100644
index 0000000..90987d1
--- /dev/null
+++ b/security/integrity/integrity_audit.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: integrity_audit.c
+ *	Audit calls for the integrity subsystem
+ */
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/audit.h>
+#include "integrity.h"
+
+static int integrity_audit_info;
+
+/* ima_audit_setup - enable informational auditing messages */
+static int __init integrity_audit_setup(char *str)
+{
+	unsigned long audit;
+
+	if (!kstrtoul(str, 0, &audit))
+		integrity_audit_info = audit ? 1 : 0;
+	return 1;
+}
+__setup("integrity_audit=", integrity_audit_setup);
+
+void integrity_audit_msg(int audit_msgno, struct inode *inode,
+			 const unsigned char *fname, const char *op,
+			 const char *cause, int result, int audit_info)
+{
+	struct audit_buffer *ab;
+	char name[TASK_COMM_LEN];
+
+	if (!integrity_audit_info && audit_info == 1)	/* Skip info messages */
+		return;
+
+	ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
+	audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
+			 task_pid_nr(current),
+			 from_kuid(&init_user_ns, current_cred()->uid),
+			 from_kuid(&init_user_ns, audit_get_loginuid(current)),
+			 audit_get_sessionid(current));
+	audit_log_task_context(ab);
+	audit_log_format(ab, " op=");
+	audit_log_string(ab, op);
+	audit_log_format(ab, " cause=");
+	audit_log_string(ab, cause);
+	audit_log_format(ab, " comm=");
+	audit_log_untrustedstring(ab, get_task_comm(name, current));
+	if (fname) {
+		audit_log_format(ab, " name=");
+		audit_log_untrustedstring(ab, fname);
+	}
+	if (inode) {
+		audit_log_format(ab, " dev=");
+		audit_log_untrustedstring(ab, inode->i_sb->s_id);
+		audit_log_format(ab, " ino=%lu", inode->i_ino);
+	}
+	audit_log_format(ab, " res=%d", !result);
+	audit_log_end(ab);
+}
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
new file mode 100644
index 0000000..1edb37e
--- /dev/null
+++ b/security/keys/Kconfig
@@ -0,0 +1,86 @@
+#
+# Key management configuration
+#
+
+config KEYS
+	bool "Enable access key retention support"
+	select ASSOCIATIVE_ARRAY
+	help
+	  This option provides support for retaining authentication tokens and
+	  access keys in the kernel.
+
+	  It also includes provision of methods by which such keys might be
+	  associated with a process so that network filesystems, encryption
+	  support and the like can find them.
+
+	  Furthermore, a special type of key is available that acts as keyring:
+	  a searchable sequence of keys. Each process is equipped with access
+	  to five standard keyrings: UID-specific, GID-specific, session,
+	  process and thread.
+
+	  If you are unsure as to whether this is required, answer N.
+
+config KEYS_COMPAT
+	def_bool y
+	depends on COMPAT && KEYS
+
+config PERSISTENT_KEYRINGS
+	bool "Enable register of persistent per-UID keyrings"
+	depends on KEYS
+	help
+	  This option provides a register of persistent per-UID keyrings,
+	  primarily aimed at Kerberos key storage.  The keyrings are persistent
+	  in the sense that they stay around after all processes of that UID
+	  have exited, not that they survive the machine being rebooted.
+
+	  A particular keyring may be accessed by either the user whose keyring
+	  it is or by a process with administrative privileges.  The active
+	  LSMs gets to rule on which admin-level processes get to access the
+	  cache.
+
+	  Keyrings are created and added into the register upon demand and get
+	  removed if they expire (a default timeout is set upon creation).
+
+config BIG_KEYS
+	bool "Large payload keys"
+	depends on KEYS
+	depends on TMPFS
+	help
+	  This option provides support for holding large keys within the kernel
+	  (for example Kerberos ticket caches).  The data may be stored out to
+	  swapspace by tmpfs.
+
+	  If you are unsure as to whether this is required, answer N.
+
+config TRUSTED_KEYS
+	tristate "TRUSTED KEYS"
+	depends on KEYS && TCG_TPM
+	select CRYPTO
+	select CRYPTO_HMAC
+	select CRYPTO_SHA1
+	help
+	  This option provides support for creating, sealing, and unsealing
+	  keys in the kernel. Trusted keys are random number symmetric keys,
+	  generated and RSA-sealed by the TPM. The TPM only unseals the keys,
+	  if the boot PCRs and other criteria match.  Userspace will only ever
+	  see encrypted blobs.
+
+	  If you are unsure as to whether this is required, answer N.
+
+config ENCRYPTED_KEYS
+	tristate "ENCRYPTED KEYS"
+	depends on KEYS
+	select CRYPTO
+	select CRYPTO_HMAC
+	select CRYPTO_AES
+	select CRYPTO_CBC
+	select CRYPTO_SHA256
+	select CRYPTO_RNG
+	help
+	  This option provides support for create/encrypting/decrypting keys
+	  in the kernel.  Encrypted keys are kernel generated random numbers,
+	  which are encrypted/decrypted with a 'master' symmetric key. The
+	  'master' key can be either a trusted-key or user-key type.
+	  Userspace only ever sees/stores encrypted blobs.
+
+	  If you are unsure as to whether this is required, answer N.
diff --git a/security/keys/Makefile b/security/keys/Makefile
new file mode 100644
index 0000000..dfb3a7b
--- /dev/null
+++ b/security/keys/Makefile
@@ -0,0 +1,28 @@
+#
+# Makefile for key management
+#
+
+#
+# Core
+#
+obj-y := \
+	gc.o \
+	key.o \
+	keyring.o \
+	keyctl.o \
+	permission.o \
+	process_keys.o \
+	request_key.o \
+	request_key_auth.o \
+	user_defined.o
+obj-$(CONFIG_KEYS_COMPAT) += compat.o
+obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSCTL) += sysctl.o
+obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
+
+#
+# Key types
+#
+obj-$(CONFIG_BIG_KEYS) += big_key.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
new file mode 100644
index 0000000..08c4cc5
--- /dev/null
+++ b/security/keys/big_key.c
@@ -0,0 +1,229 @@
+/* Large capacity key type
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/shmem_fs.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/big_key-type.h>
+
+MODULE_LICENSE("GPL");
+
+/*
+ * Layout of key payload words.
+ */
+enum {
+	big_key_data,
+	big_key_path,
+	big_key_path_2nd_part,
+	big_key_len,
+};
+
+/*
+ * If the data is under this limit, there's no point creating a shm file to
+ * hold it as the permanently resident metadata for the shmem fs will be at
+ * least as large as the data.
+ */
+#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
+
+/*
+ * big_key defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_big_key = {
+	.name			= "big_key",
+	.preparse		= big_key_preparse,
+	.free_preparse		= big_key_free_preparse,
+	.instantiate		= generic_key_instantiate,
+	.revoke			= big_key_revoke,
+	.destroy		= big_key_destroy,
+	.describe		= big_key_describe,
+	.read			= big_key_read,
+};
+
+/*
+ * Preparse a big key
+ */
+int big_key_preparse(struct key_preparsed_payload *prep)
+{
+	struct path *path = (struct path *)&prep->payload.data[big_key_path];
+	struct file *file;
+	ssize_t written;
+	size_t datalen = prep->datalen;
+	int ret;
+
+	ret = -EINVAL;
+	if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
+		goto error;
+
+	/* Set an arbitrary quota */
+	prep->quotalen = 16;
+
+	prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;
+
+	if (datalen > BIG_KEY_FILE_THRESHOLD) {
+		/* Create a shmem file to store the data in.  This will permit the data
+		 * to be swapped out if needed.
+		 *
+		 * TODO: Encrypt the stored data with a temporary key.
+		 */
+		file = shmem_kernel_file_setup("", datalen, 0);
+		if (IS_ERR(file)) {
+			ret = PTR_ERR(file);
+			goto error;
+		}
+
+		written = kernel_write(file, prep->data, prep->datalen, 0);
+		if (written != datalen) {
+			ret = written;
+			if (written >= 0)
+				ret = -ENOMEM;
+			goto err_fput;
+		}
+
+		/* Pin the mount and dentry to the key so that we can open it again
+		 * later
+		 */
+		*path = file->f_path;
+		path_get(path);
+		fput(file);
+	} else {
+		/* Just store the data in a buffer */
+		void *data = kmalloc(datalen, GFP_KERNEL);
+		if (!data)
+			return -ENOMEM;
+
+		prep->payload.data[big_key_data] = data;
+		memcpy(data, prep->data, prep->datalen);
+	}
+	return 0;
+
+err_fput:
+	fput(file);
+error:
+	return ret;
+}
+
+/*
+ * Clear preparsement.
+ */
+void big_key_free_preparse(struct key_preparsed_payload *prep)
+{
+	if (prep->datalen > BIG_KEY_FILE_THRESHOLD) {
+		struct path *path = (struct path *)&prep->payload.data[big_key_path];
+		path_put(path);
+	} else {
+		kfree(prep->payload.data[big_key_data]);
+	}
+}
+
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void big_key_revoke(struct key *key)
+{
+	struct path *path = (struct path *)&key->payload.data[big_key_path];
+
+	/* clear the quota */
+	key_payload_reserve(key, 0);
+	if (key_is_positive(key) &&
+	    (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
+		vfs_truncate(path, 0);
+}
+
+/*
+ * dispose of the data dangling from the corpse of a big_key key
+ */
+void big_key_destroy(struct key *key)
+{
+	size_t datalen = (size_t)key->payload.data[big_key_len];
+
+	if (datalen) {
+		struct path *path = (struct path *)&key->payload.data[big_key_path];
+		path_put(path);
+		path->mnt = NULL;
+		path->dentry = NULL;
+	} else {
+		kfree(key->payload.data[big_key_data]);
+		key->payload.data[big_key_data] = NULL;
+	}
+}
+
+/*
+ * describe the big_key key
+ */
+void big_key_describe(const struct key *key, struct seq_file *m)
+{
+	size_t datalen = (size_t)key->payload.data[big_key_len];
+
+	seq_puts(m, key->description);
+
+	if (key_is_positive(key))
+		seq_printf(m, ": %zu [%s]",
+			   datalen,
+			   datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
+}
+
+/*
+ * read the key data
+ * - the key's semaphore is read-locked
+ */
+long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+{
+	size_t datalen = (size_t)key->payload.data[big_key_len];
+	long ret;
+
+	if (!buffer || buflen < datalen)
+		return datalen;
+
+	if (datalen > BIG_KEY_FILE_THRESHOLD) {
+		struct path *path = (struct path *)&key->payload.data[big_key_path];
+		struct file *file;
+		loff_t pos;
+
+		file = dentry_open(path, O_RDONLY, current_cred());
+		if (IS_ERR(file))
+			return PTR_ERR(file);
+
+		pos = 0;
+		ret = vfs_read(file, buffer, datalen, &pos);
+		fput(file);
+		if (ret >= 0 && ret != datalen)
+			ret = -EIO;
+	} else {
+		ret = datalen;
+		if (copy_to_user(buffer, key->payload.data[big_key_data],
+				 datalen) != 0)
+			ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+/*
+ * Module stuff
+ */
+static int __init big_key_init(void)
+{
+	return register_key_type(&key_type_big_key);
+}
+
+static void __exit big_key_cleanup(void)
+{
+	unregister_key_type(&key_type_big_key);
+}
+
+module_init(big_key_init);
+module_exit(big_key_cleanup);
diff --git a/security/keys/compat.c b/security/keys/compat.c
new file mode 100644
index 0000000..25430a3
--- /dev/null
+++ b/security/keys/compat.c
@@ -0,0 +1,138 @@
+/* 32-bit compatibility syscall for 64-bit systems
+ *
+ * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/keyctl.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Instantiate a key with the specified compatibility multipart payload and
+ * link the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority).  No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+static long compat_keyctl_instantiate_key_iov(
+	key_serial_t id,
+	const struct compat_iovec __user *_payload_iov,
+	unsigned ioc,
+	key_serial_t ringid)
+{
+	struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+	struct iov_iter from;
+	long ret;
+
+	if (!_payload_iov)
+		ioc = 0;
+
+	ret = compat_import_iovec(WRITE, _payload_iov, ioc,
+				  ARRAY_SIZE(iovstack), &iov,
+				  &from);
+	if (ret < 0)
+		return ret;
+
+	ret = keyctl_instantiate_key_common(id, &from, ringid);
+	kfree(iov);
+	return ret;
+}
+
+/*
+ * The key control system call, 32-bit compatibility version for 64-bit archs
+ *
+ * This should only be called if the 64-bit arch uses weird pointers in 32-bit
+ * mode or doesn't guarantee that the top 32-bits of the argument registers on
+ * taking a 32-bit syscall are zero.  If you can, you should call sys_keyctl()
+ * directly.
+ */
+COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
+		       u32, arg2, u32, arg3, u32, arg4, u32, arg5)
+{
+	switch (option) {
+	case KEYCTL_GET_KEYRING_ID:
+		return keyctl_get_keyring_ID(arg2, arg3);
+
+	case KEYCTL_JOIN_SESSION_KEYRING:
+		return keyctl_join_session_keyring(compat_ptr(arg2));
+
+	case KEYCTL_UPDATE:
+		return keyctl_update_key(arg2, compat_ptr(arg3), arg4);
+
+	case KEYCTL_REVOKE:
+		return keyctl_revoke_key(arg2);
+
+	case KEYCTL_DESCRIBE:
+		return keyctl_describe_key(arg2, compat_ptr(arg3), arg4);
+
+	case KEYCTL_CLEAR:
+		return keyctl_keyring_clear(arg2);
+
+	case KEYCTL_LINK:
+		return keyctl_keyring_link(arg2, arg3);
+
+	case KEYCTL_UNLINK:
+		return keyctl_keyring_unlink(arg2, arg3);
+
+	case KEYCTL_SEARCH:
+		return keyctl_keyring_search(arg2, compat_ptr(arg3),
+					     compat_ptr(arg4), arg5);
+
+	case KEYCTL_READ:
+		return keyctl_read_key(arg2, compat_ptr(arg3), arg4);
+
+	case KEYCTL_CHOWN:
+		return keyctl_chown_key(arg2, arg3, arg4);
+
+	case KEYCTL_SETPERM:
+		return keyctl_setperm_key(arg2, arg3);
+
+	case KEYCTL_INSTANTIATE:
+		return keyctl_instantiate_key(arg2, compat_ptr(arg3), arg4,
+					      arg5);
+
+	case KEYCTL_NEGATE:
+		return keyctl_negate_key(arg2, arg3, arg4);
+
+	case KEYCTL_SET_REQKEY_KEYRING:
+		return keyctl_set_reqkey_keyring(arg2);
+
+	case KEYCTL_SET_TIMEOUT:
+		return keyctl_set_timeout(arg2, arg3);
+
+	case KEYCTL_ASSUME_AUTHORITY:
+		return keyctl_assume_authority(arg2);
+
+	case KEYCTL_GET_SECURITY:
+		return keyctl_get_security(arg2, compat_ptr(arg3), arg4);
+
+	case KEYCTL_SESSION_TO_PARENT:
+		return keyctl_session_to_parent();
+
+	case KEYCTL_REJECT:
+		return keyctl_reject_key(arg2, arg3, arg4, arg5);
+
+	case KEYCTL_INSTANTIATE_IOV:
+		return compat_keyctl_instantiate_key_iov(
+			arg2, compat_ptr(arg3), arg4, arg5);
+
+	case KEYCTL_INVALIDATE:
+		return keyctl_invalidate_key(arg2);
+
+	case KEYCTL_GET_PERSISTENT:
+		return keyctl_get_persistent(arg2, arg3);
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
diff --git a/security/keys/encrypted-keys/Makefile b/security/keys/encrypted-keys/Makefile
new file mode 100644
index 0000000..d6f8433
--- /dev/null
+++ b/security/keys/encrypted-keys/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for encrypted keys
+#
+
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys.o
+
+encrypted-keys-y := encrypted.o ecryptfs_format.o
+masterkey-$(CONFIG_TRUSTED_KEYS) := masterkey_trusted.o
+masterkey-$(CONFIG_TRUSTED_KEYS)-$(CONFIG_ENCRYPTED_KEYS) := masterkey_trusted.o
+encrypted-keys-y += $(masterkey-y) $(masterkey-m-m)
diff --git a/security/keys/encrypted-keys/ecryptfs_format.c b/security/keys/encrypted-keys/ecryptfs_format.c
new file mode 100644
index 0000000..6daa3b6
--- /dev/null
+++ b/security/keys/encrypted-keys/ecryptfs_format.c
@@ -0,0 +1,81 @@
+/*
+ * ecryptfs_format.c: helper functions for the encrypted key type
+ *
+ * Copyright (C) 2006 International Business Machines Corp.
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ *                    TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Michael A. Halcrow <mahalcro@us.ibm.com>
+ * Tyler Hicks <tyhicks@ou.edu>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include "ecryptfs_format.h"
+
+u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok)
+{
+	return auth_tok->token.password.session_key_encryption_key;
+}
+EXPORT_SYMBOL(ecryptfs_get_auth_tok_key);
+
+/*
+ * ecryptfs_get_versions()
+ *
+ * Source code taken from the software 'ecryptfs-utils' version 83.
+ *
+ */
+void ecryptfs_get_versions(int *major, int *minor, int *file_version)
+{
+	*major = ECRYPTFS_VERSION_MAJOR;
+	*minor = ECRYPTFS_VERSION_MINOR;
+	if (file_version)
+		*file_version = ECRYPTFS_SUPPORTED_FILE_VERSION;
+}
+EXPORT_SYMBOL(ecryptfs_get_versions);
+
+/*
+ * ecryptfs_fill_auth_tok - fill the ecryptfs_auth_tok structure
+ *
+ * Fill the ecryptfs_auth_tok structure with required ecryptfs data.
+ * The source code is inspired to the original function generate_payload()
+ * shipped with the software 'ecryptfs-utils' version 83.
+ *
+ */
+int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok,
+			   const char *key_desc)
+{
+	int major, minor;
+
+	ecryptfs_get_versions(&major, &minor, NULL);
+	auth_tok->version = (((uint16_t)(major << 8) & 0xFF00)
+			     | ((uint16_t)minor & 0x00FF));
+	auth_tok->token_type = ECRYPTFS_PASSWORD;
+	strncpy((char *)auth_tok->token.password.signature, key_desc,
+		ECRYPTFS_PASSWORD_SIG_SIZE);
+	auth_tok->token.password.session_key_encryption_key_bytes =
+		ECRYPTFS_MAX_KEY_BYTES;
+	/*
+	 * Removed auth_tok->token.password.salt and
+	 * auth_tok->token.password.session_key_encryption_key
+	 * initialization from the original code
+	 */
+	/* TODO: Make the hash parameterizable via policy */
+	auth_tok->token.password.flags |=
+		ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET;
+	/* The kernel code will encrypt the session key. */
+	auth_tok->session_key.encrypted_key[0] = 0;
+	auth_tok->session_key.encrypted_key_size = 0;
+	/* Default; subject to change by kernel eCryptfs */
+	auth_tok->token.password.hash_algo = PGP_DIGEST_ALGO_SHA512;
+	auth_tok->token.password.flags &= ~(ECRYPTFS_PERSISTENT_PASSWORD);
+	return 0;
+}
+EXPORT_SYMBOL(ecryptfs_fill_auth_tok);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/encrypted-keys/ecryptfs_format.h b/security/keys/encrypted-keys/ecryptfs_format.h
new file mode 100644
index 0000000..40294de
--- /dev/null
+++ b/security/keys/encrypted-keys/ecryptfs_format.h
@@ -0,0 +1,30 @@
+/*
+ * ecryptfs_format.h: helper functions for the encrypted key type
+ *
+ * Copyright (C) 2006 International Business Machines Corp.
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ *                    TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Michael A. Halcrow <mahalcro@us.ibm.com>
+ * Tyler Hicks <tyhicks@ou.edu>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef __KEYS_ECRYPTFS_H
+#define __KEYS_ECRYPTFS_H
+
+#include <linux/ecryptfs.h>
+
+#define PGP_DIGEST_ALGO_SHA512   10
+
+u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok);
+void ecryptfs_get_versions(int *major, int *minor, int *file_version);
+int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok,
+			   const char *key_desc);
+
+#endif /* __KEYS_ECRYPTFS_H */
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
new file mode 100644
index 0000000..ce295c0
--- /dev/null
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -0,0 +1,1051 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ *                    TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/security/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include <linux/key-type.h>
+#include <linux/random.h>
+#include <linux/rcupdate.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/ctype.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/aes.h>
+
+#include "encrypted.h"
+#include "ecryptfs_format.h"
+
+static const char KEY_TRUSTED_PREFIX[] = "trusted:";
+static const char KEY_USER_PREFIX[] = "user:";
+static const char hash_alg[] = "sha256";
+static const char hmac_alg[] = "hmac(sha256)";
+static const char blkcipher_alg[] = "cbc(aes)";
+static const char key_format_default[] = "default";
+static const char key_format_ecryptfs[] = "ecryptfs";
+static unsigned int ivsize;
+static int blksize;
+
+#define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1)
+#define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1)
+#define KEY_ECRYPTFS_DESC_LEN 16
+#define HASH_SIZE SHA256_DIGEST_SIZE
+#define MAX_DATA_SIZE 4096
+#define MIN_DATA_SIZE  20
+
+struct sdesc {
+	struct shash_desc shash;
+	char ctx[];
+};
+
+static struct crypto_shash *hashalg;
+static struct crypto_shash *hmacalg;
+
+enum {
+	Opt_err = -1, Opt_new, Opt_load, Opt_update
+};
+
+enum {
+	Opt_error = -1, Opt_default, Opt_ecryptfs
+};
+
+static const match_table_t key_format_tokens = {
+	{Opt_default, "default"},
+	{Opt_ecryptfs, "ecryptfs"},
+	{Opt_error, NULL}
+};
+
+static const match_table_t key_tokens = {
+	{Opt_new, "new"},
+	{Opt_load, "load"},
+	{Opt_update, "update"},
+	{Opt_err, NULL}
+};
+
+static int aes_get_sizes(void)
+{
+	struct crypto_blkcipher *tfm;
+
+	tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		pr_err("encrypted_key: failed to alloc_cipher (%ld)\n",
+		       PTR_ERR(tfm));
+		return PTR_ERR(tfm);
+	}
+	ivsize = crypto_blkcipher_ivsize(tfm);
+	blksize = crypto_blkcipher_blocksize(tfm);
+	crypto_free_blkcipher(tfm);
+	return 0;
+}
+
+/*
+ * valid_ecryptfs_desc - verify the description of a new/loaded encrypted key
+ *
+ * The description of a encrypted key with format 'ecryptfs' must contain
+ * exactly 16 hexadecimal characters.
+ *
+ */
+static int valid_ecryptfs_desc(const char *ecryptfs_desc)
+{
+	int i;
+
+	if (strlen(ecryptfs_desc) != KEY_ECRYPTFS_DESC_LEN) {
+		pr_err("encrypted_key: key description must be %d hexadecimal "
+		       "characters long\n", KEY_ECRYPTFS_DESC_LEN);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < KEY_ECRYPTFS_DESC_LEN; i++) {
+		if (!isxdigit(ecryptfs_desc[i])) {
+			pr_err("encrypted_key: key description must contain "
+			       "only hexadecimal characters\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key
+ *
+ * key-type:= "trusted:" | "user:"
+ * desc:= master-key description
+ *
+ * Verify that 'key-type' is valid and that 'desc' exists. On key update,
+ * only the master key description is permitted to change, not the key-type.
+ * The key-type remains constant.
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int valid_master_desc(const char *new_desc, const char *orig_desc)
+{
+	if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) {
+		if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN)
+			goto out;
+		if (orig_desc)
+			if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN))
+				goto out;
+	} else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) {
+		if (strlen(new_desc) == KEY_USER_PREFIX_LEN)
+			goto out;
+		if (orig_desc)
+			if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN))
+				goto out;
+	} else
+		goto out;
+	return 0;
+out:
+	return -EINVAL;
+}
+
+/*
+ * datablob_parse - parse the keyctl data
+ *
+ * datablob format:
+ * new [<format>] <master-key name> <decrypted data length>
+ * load [<format>] <master-key name> <decrypted data length>
+ *     <encrypted iv + data>
+ * update <new-master-key name>
+ *
+ * Tokenizes a copy of the keyctl data, returning a pointer to each token,
+ * which is null terminated.
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char *datablob, const char **format,
+			  char **master_desc, char **decrypted_datalen,
+			  char **hex_encoded_iv)
+{
+	substring_t args[MAX_OPT_ARGS];
+	int ret = -EINVAL;
+	int key_cmd;
+	int key_format;
+	char *p, *keyword;
+
+	keyword = strsep(&datablob, " \t");
+	if (!keyword) {
+		pr_info("encrypted_key: insufficient parameters specified\n");
+		return ret;
+	}
+	key_cmd = match_token(keyword, key_tokens, args);
+
+	/* Get optional format: default | ecryptfs */
+	p = strsep(&datablob, " \t");
+	if (!p) {
+		pr_err("encrypted_key: insufficient parameters specified\n");
+		return ret;
+	}
+
+	key_format = match_token(p, key_format_tokens, args);
+	switch (key_format) {
+	case Opt_ecryptfs:
+	case Opt_default:
+		*format = p;
+		*master_desc = strsep(&datablob, " \t");
+		break;
+	case Opt_error:
+		*master_desc = p;
+		break;
+	}
+
+	if (!*master_desc) {
+		pr_info("encrypted_key: master key parameter is missing\n");
+		goto out;
+	}
+
+	if (valid_master_desc(*master_desc, NULL) < 0) {
+		pr_info("encrypted_key: master key parameter \'%s\' "
+			"is invalid\n", *master_desc);
+		goto out;
+	}
+
+	if (decrypted_datalen) {
+		*decrypted_datalen = strsep(&datablob, " \t");
+		if (!*decrypted_datalen) {
+			pr_info("encrypted_key: keylen parameter is missing\n");
+			goto out;
+		}
+	}
+
+	switch (key_cmd) {
+	case Opt_new:
+		if (!decrypted_datalen) {
+			pr_info("encrypted_key: keyword \'%s\' not allowed "
+				"when called from .update method\n", keyword);
+			break;
+		}
+		ret = 0;
+		break;
+	case Opt_load:
+		if (!decrypted_datalen) {
+			pr_info("encrypted_key: keyword \'%s\' not allowed "
+				"when called from .update method\n", keyword);
+			break;
+		}
+		*hex_encoded_iv = strsep(&datablob, " \t");
+		if (!*hex_encoded_iv) {
+			pr_info("encrypted_key: hex blob is missing\n");
+			break;
+		}
+		ret = 0;
+		break;
+	case Opt_update:
+		if (decrypted_datalen) {
+			pr_info("encrypted_key: keyword \'%s\' not allowed "
+				"when called from .instantiate method\n",
+				keyword);
+			break;
+		}
+		ret = 0;
+		break;
+	case Opt_err:
+		pr_info("encrypted_key: keyword \'%s\' not recognized\n",
+			keyword);
+		break;
+	}
+out:
+	return ret;
+}
+
+/*
+ * datablob_format - format as an ascii string, before copying to userspace
+ */
+static char *datablob_format(struct encrypted_key_payload *epayload,
+			     size_t asciiblob_len)
+{
+	char *ascii_buf, *bufp;
+	u8 *iv = epayload->iv;
+	int len;
+	int i;
+
+	ascii_buf = kmalloc(asciiblob_len + 1, GFP_KERNEL);
+	if (!ascii_buf)
+		goto out;
+
+	ascii_buf[asciiblob_len] = '\0';
+
+	/* copy datablob master_desc and datalen strings */
+	len = sprintf(ascii_buf, "%s %s %s ", epayload->format,
+		      epayload->master_desc, epayload->datalen);
+
+	/* convert the hex encoded iv, encrypted-data and HMAC to ascii */
+	bufp = &ascii_buf[len];
+	for (i = 0; i < (asciiblob_len - len) / 2; i++)
+		bufp = hex_byte_pack(bufp, iv[i]);
+out:
+	return ascii_buf;
+}
+
+/*
+ * request_user_key - request the user key
+ *
+ * Use a user provided key to encrypt/decrypt an encrypted-key.
+ */
+static struct key *request_user_key(const char *master_desc, const u8 **master_key,
+				    size_t *master_keylen)
+{
+	const struct user_key_payload *upayload;
+	struct key *ukey;
+
+	ukey = request_key(&key_type_user, master_desc, NULL);
+	if (IS_ERR(ukey))
+		goto error;
+
+	down_read(&ukey->sem);
+	upayload = user_key_payload(ukey);
+	if (!upayload) {
+		/* key was revoked before we acquired its semaphore */
+		up_read(&ukey->sem);
+		key_put(ukey);
+		ukey = ERR_PTR(-EKEYREVOKED);
+		goto error;
+	}
+	*master_key = upayload->data;
+	*master_keylen = upayload->datalen;
+error:
+	return ukey;
+}
+
+static struct sdesc *alloc_sdesc(struct crypto_shash *alg)
+{
+	struct sdesc *sdesc;
+	int size;
+
+	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+	sdesc = kmalloc(size, GFP_KERNEL);
+	if (!sdesc)
+		return ERR_PTR(-ENOMEM);
+	sdesc->shash.tfm = alg;
+	sdesc->shash.flags = 0x0;
+	return sdesc;
+}
+
+static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
+		     const u8 *buf, unsigned int buflen)
+{
+	struct sdesc *sdesc;
+	int ret;
+
+	sdesc = alloc_sdesc(hmacalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("encrypted_key: can't alloc %s\n", hmac_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	ret = crypto_shash_setkey(hmacalg, key, keylen);
+	if (!ret)
+		ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
+	kfree(sdesc);
+	return ret;
+}
+
+static int calc_hash(u8 *digest, const u8 *buf, unsigned int buflen)
+{
+	struct sdesc *sdesc;
+	int ret;
+
+	sdesc = alloc_sdesc(hashalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("encrypted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
+	kfree(sdesc);
+	return ret;
+}
+
+enum derived_key_type { ENC_KEY, AUTH_KEY };
+
+/* Derive authentication/encryption key from trusted key */
+static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
+			   const u8 *master_key, size_t master_keylen)
+{
+	u8 *derived_buf;
+	unsigned int derived_buf_len;
+	int ret;
+
+	derived_buf_len = strlen("AUTH_KEY") + 1 + master_keylen;
+	if (derived_buf_len < HASH_SIZE)
+		derived_buf_len = HASH_SIZE;
+
+	derived_buf = kzalloc(derived_buf_len, GFP_KERNEL);
+	if (!derived_buf) {
+		pr_err("encrypted_key: out of memory\n");
+		return -ENOMEM;
+	}
+	if (key_type)
+		strcpy(derived_buf, "AUTH_KEY");
+	else
+		strcpy(derived_buf, "ENC_KEY");
+
+	memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
+	       master_keylen);
+	ret = calc_hash(derived_key, derived_buf, derived_buf_len);
+	kfree(derived_buf);
+	return ret;
+}
+
+static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
+			       unsigned int key_len, const u8 *iv,
+			       unsigned int ivsize)
+{
+	int ret;
+
+	desc->tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(desc->tfm)) {
+		pr_err("encrypted_key: failed to load %s transform (%ld)\n",
+		       blkcipher_alg, PTR_ERR(desc->tfm));
+		return PTR_ERR(desc->tfm);
+	}
+	desc->flags = 0;
+
+	ret = crypto_blkcipher_setkey(desc->tfm, key, key_len);
+	if (ret < 0) {
+		pr_err("encrypted_key: failed to setkey (%d)\n", ret);
+		crypto_free_blkcipher(desc->tfm);
+		return ret;
+	}
+	crypto_blkcipher_set_iv(desc->tfm, iv, ivsize);
+	return 0;
+}
+
+static struct key *request_master_key(struct encrypted_key_payload *epayload,
+				      const u8 **master_key, size_t *master_keylen)
+{
+	struct key *mkey = ERR_PTR(-EINVAL);
+
+	if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
+		     KEY_TRUSTED_PREFIX_LEN)) {
+		mkey = request_trusted_key(epayload->master_desc +
+					   KEY_TRUSTED_PREFIX_LEN,
+					   master_key, master_keylen);
+	} else if (!strncmp(epayload->master_desc, KEY_USER_PREFIX,
+			    KEY_USER_PREFIX_LEN)) {
+		mkey = request_user_key(epayload->master_desc +
+					KEY_USER_PREFIX_LEN,
+					master_key, master_keylen);
+	} else
+		goto out;
+
+	if (IS_ERR(mkey)) {
+		int ret = PTR_ERR(mkey);
+
+		if (ret == -ENOTSUPP)
+			pr_info("encrypted_key: key %s not supported",
+				epayload->master_desc);
+		else
+			pr_info("encrypted_key: key %s not found",
+				epayload->master_desc);
+		goto out;
+	}
+
+	dump_master_key(*master_key, *master_keylen);
+out:
+	return mkey;
+}
+
+/* Before returning data to userspace, encrypt decrypted data. */
+static int derived_key_encrypt(struct encrypted_key_payload *epayload,
+			       const u8 *derived_key,
+			       unsigned int derived_keylen)
+{
+	struct scatterlist sg_in[2];
+	struct scatterlist sg_out[1];
+	struct blkcipher_desc desc;
+	unsigned int encrypted_datalen;
+	unsigned int padlen;
+	char pad[16];
+	int ret;
+
+	encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+	padlen = encrypted_datalen - epayload->decrypted_datalen;
+
+	ret = init_blkcipher_desc(&desc, derived_key, derived_keylen,
+				  epayload->iv, ivsize);
+	if (ret < 0)
+		goto out;
+	dump_decrypted_data(epayload);
+
+	memset(pad, 0, sizeof pad);
+	sg_init_table(sg_in, 2);
+	sg_set_buf(&sg_in[0], epayload->decrypted_data,
+		   epayload->decrypted_datalen);
+	sg_set_buf(&sg_in[1], pad, padlen);
+
+	sg_init_table(sg_out, 1);
+	sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
+
+	ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, encrypted_datalen);
+	crypto_free_blkcipher(desc.tfm);
+	if (ret < 0)
+		pr_err("encrypted_key: failed to encrypt (%d)\n", ret);
+	else
+		dump_encrypted_data(epayload, encrypted_datalen);
+out:
+	return ret;
+}
+
+static int datablob_hmac_append(struct encrypted_key_payload *epayload,
+				const u8 *master_key, size_t master_keylen)
+{
+	u8 derived_key[HASH_SIZE];
+	u8 *digest;
+	int ret;
+
+	ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
+	if (ret < 0)
+		goto out;
+
+	digest = epayload->format + epayload->datablob_len;
+	ret = calc_hmac(digest, derived_key, sizeof derived_key,
+			epayload->format, epayload->datablob_len);
+	if (!ret)
+		dump_hmac(NULL, digest, HASH_SIZE);
+out:
+	return ret;
+}
+
+/* verify HMAC before decrypting encrypted key */
+static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
+				const u8 *format, const u8 *master_key,
+				size_t master_keylen)
+{
+	u8 derived_key[HASH_SIZE];
+	u8 digest[HASH_SIZE];
+	int ret;
+	char *p;
+	unsigned short len;
+
+	ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
+	if (ret < 0)
+		goto out;
+
+	len = epayload->datablob_len;
+	if (!format) {
+		p = epayload->master_desc;
+		len -= strlen(epayload->format) + 1;
+	} else
+		p = epayload->format;
+
+	ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
+	if (ret < 0)
+		goto out;
+	ret = memcmp(digest, epayload->format + epayload->datablob_len,
+		     sizeof digest);
+	if (ret) {
+		ret = -EINVAL;
+		dump_hmac("datablob",
+			  epayload->format + epayload->datablob_len,
+			  HASH_SIZE);
+		dump_hmac("calc", digest, HASH_SIZE);
+	}
+out:
+	return ret;
+}
+
+static int derived_key_decrypt(struct encrypted_key_payload *epayload,
+			       const u8 *derived_key,
+			       unsigned int derived_keylen)
+{
+	struct scatterlist sg_in[1];
+	struct scatterlist sg_out[2];
+	struct blkcipher_desc desc;
+	unsigned int encrypted_datalen;
+	char pad[16];
+	int ret;
+
+	encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+	ret = init_blkcipher_desc(&desc, derived_key, derived_keylen,
+				  epayload->iv, ivsize);
+	if (ret < 0)
+		goto out;
+	dump_encrypted_data(epayload, encrypted_datalen);
+
+	memset(pad, 0, sizeof pad);
+	sg_init_table(sg_in, 1);
+	sg_init_table(sg_out, 2);
+	sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
+	sg_set_buf(&sg_out[0], epayload->decrypted_data,
+		   epayload->decrypted_datalen);
+	sg_set_buf(&sg_out[1], pad, sizeof pad);
+
+	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, encrypted_datalen);
+	crypto_free_blkcipher(desc.tfm);
+	if (ret < 0)
+		goto out;
+	dump_decrypted_data(epayload);
+out:
+	return ret;
+}
+
+/* Allocate memory for decrypted key and datablob. */
+static struct encrypted_key_payload *encrypted_key_alloc(struct key *key,
+							 const char *format,
+							 const char *master_desc,
+							 const char *datalen)
+{
+	struct encrypted_key_payload *epayload = NULL;
+	unsigned short datablob_len;
+	unsigned short decrypted_datalen;
+	unsigned short payload_datalen;
+	unsigned int encrypted_datalen;
+	unsigned int format_len;
+	long dlen;
+	int ret;
+
+	ret = kstrtol(datalen, 10, &dlen);
+	if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE)
+		return ERR_PTR(-EINVAL);
+
+	format_len = (!format) ? strlen(key_format_default) : strlen(format);
+	decrypted_datalen = dlen;
+	payload_datalen = decrypted_datalen;
+	if (format && !strcmp(format, key_format_ecryptfs)) {
+		if (dlen != ECRYPTFS_MAX_KEY_BYTES) {
+			pr_err("encrypted_key: keylen for the ecryptfs format "
+			       "must be equal to %d bytes\n",
+			       ECRYPTFS_MAX_KEY_BYTES);
+			return ERR_PTR(-EINVAL);
+		}
+		decrypted_datalen = ECRYPTFS_MAX_KEY_BYTES;
+		payload_datalen = sizeof(struct ecryptfs_auth_tok);
+	}
+
+	encrypted_datalen = roundup(decrypted_datalen, blksize);
+
+	datablob_len = format_len + 1 + strlen(master_desc) + 1
+	    + strlen(datalen) + 1 + ivsize + 1 + encrypted_datalen;
+
+	ret = key_payload_reserve(key, payload_datalen + datablob_len
+				  + HASH_SIZE + 1);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	epayload = kzalloc(sizeof(*epayload) + payload_datalen +
+			   datablob_len + HASH_SIZE + 1, GFP_KERNEL);
+	if (!epayload)
+		return ERR_PTR(-ENOMEM);
+
+	epayload->payload_datalen = payload_datalen;
+	epayload->decrypted_datalen = decrypted_datalen;
+	epayload->datablob_len = datablob_len;
+	return epayload;
+}
+
+static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
+				 const char *format, const char *hex_encoded_iv)
+{
+	struct key *mkey;
+	u8 derived_key[HASH_SIZE];
+	const u8 *master_key;
+	u8 *hmac;
+	const char *hex_encoded_data;
+	unsigned int encrypted_datalen;
+	size_t master_keylen;
+	size_t asciilen;
+	int ret;
+
+	encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+	asciilen = (ivsize + 1 + encrypted_datalen + HASH_SIZE) * 2;
+	if (strlen(hex_encoded_iv) != asciilen)
+		return -EINVAL;
+
+	hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2;
+	ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize);
+	if (ret < 0)
+		return -EINVAL;
+	ret = hex2bin(epayload->encrypted_data, hex_encoded_data,
+		      encrypted_datalen);
+	if (ret < 0)
+		return -EINVAL;
+
+	hmac = epayload->format + epayload->datablob_len;
+	ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2),
+		      HASH_SIZE);
+	if (ret < 0)
+		return -EINVAL;
+
+	mkey = request_master_key(epayload, &master_key, &master_keylen);
+	if (IS_ERR(mkey))
+		return PTR_ERR(mkey);
+
+	ret = datablob_hmac_verify(epayload, format, master_key, master_keylen);
+	if (ret < 0) {
+		pr_err("encrypted_key: bad hmac (%d)\n", ret);
+		goto out;
+	}
+
+	ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen);
+	if (ret < 0)
+		goto out;
+
+	ret = derived_key_decrypt(epayload, derived_key, sizeof derived_key);
+	if (ret < 0)
+		pr_err("encrypted_key: failed to decrypt key (%d)\n", ret);
+out:
+	up_read(&mkey->sem);
+	key_put(mkey);
+	return ret;
+}
+
+static void __ekey_init(struct encrypted_key_payload *epayload,
+			const char *format, const char *master_desc,
+			const char *datalen)
+{
+	unsigned int format_len;
+
+	format_len = (!format) ? strlen(key_format_default) : strlen(format);
+	epayload->format = epayload->payload_data + epayload->payload_datalen;
+	epayload->master_desc = epayload->format + format_len + 1;
+	epayload->datalen = epayload->master_desc + strlen(master_desc) + 1;
+	epayload->iv = epayload->datalen + strlen(datalen) + 1;
+	epayload->encrypted_data = epayload->iv + ivsize + 1;
+	epayload->decrypted_data = epayload->payload_data;
+
+	if (!format)
+		memcpy(epayload->format, key_format_default, format_len);
+	else {
+		if (!strcmp(format, key_format_ecryptfs))
+			epayload->decrypted_data =
+				ecryptfs_get_auth_tok_key((struct ecryptfs_auth_tok *)epayload->payload_data);
+
+		memcpy(epayload->format, format, format_len);
+	}
+
+	memcpy(epayload->master_desc, master_desc, strlen(master_desc));
+	memcpy(epayload->datalen, datalen, strlen(datalen));
+}
+
+/*
+ * encrypted_init - initialize an encrypted key
+ *
+ * For a new key, use a random number for both the iv and data
+ * itself.  For an old key, decrypt the hex encoded data.
+ */
+static int encrypted_init(struct encrypted_key_payload *epayload,
+			  const char *key_desc, const char *format,
+			  const char *master_desc, const char *datalen,
+			  const char *hex_encoded_iv)
+{
+	int ret = 0;
+
+	if (format && !strcmp(format, key_format_ecryptfs)) {
+		ret = valid_ecryptfs_desc(key_desc);
+		if (ret < 0)
+			return ret;
+
+		ecryptfs_fill_auth_tok((struct ecryptfs_auth_tok *)epayload->payload_data,
+				       key_desc);
+	}
+
+	__ekey_init(epayload, format, master_desc, datalen);
+	if (!hex_encoded_iv) {
+		get_random_bytes(epayload->iv, ivsize);
+
+		get_random_bytes(epayload->decrypted_data,
+				 epayload->decrypted_datalen);
+	} else
+		ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv);
+	return ret;
+}
+
+/*
+ * encrypted_instantiate - instantiate an encrypted key
+ *
+ * Decrypt an existing encrypted datablob or create a new encrypted key
+ * based on a kernel random number.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int encrypted_instantiate(struct key *key,
+				 struct key_preparsed_payload *prep)
+{
+	struct encrypted_key_payload *epayload = NULL;
+	char *datablob = NULL;
+	const char *format = NULL;
+	char *master_desc = NULL;
+	char *decrypted_datalen = NULL;
+	char *hex_encoded_iv = NULL;
+	size_t datalen = prep->datalen;
+	int ret;
+
+	if (datalen <= 0 || datalen > 32767 || !prep->data)
+		return -EINVAL;
+
+	datablob = kmalloc(datalen + 1, GFP_KERNEL);
+	if (!datablob)
+		return -ENOMEM;
+	datablob[datalen] = 0;
+	memcpy(datablob, prep->data, datalen);
+	ret = datablob_parse(datablob, &format, &master_desc,
+			     &decrypted_datalen, &hex_encoded_iv);
+	if (ret < 0)
+		goto out;
+
+	epayload = encrypted_key_alloc(key, format, master_desc,
+				       decrypted_datalen);
+	if (IS_ERR(epayload)) {
+		ret = PTR_ERR(epayload);
+		goto out;
+	}
+	ret = encrypted_init(epayload, key->description, format, master_desc,
+			     decrypted_datalen, hex_encoded_iv);
+	if (ret < 0) {
+		kfree(epayload);
+		goto out;
+	}
+
+	rcu_assign_keypointer(key, epayload);
+out:
+	kfree(datablob);
+	return ret;
+}
+
+static void encrypted_rcu_free(struct rcu_head *rcu)
+{
+	struct encrypted_key_payload *epayload;
+
+	epayload = container_of(rcu, struct encrypted_key_payload, rcu);
+	memset(epayload->decrypted_data, 0, epayload->decrypted_datalen);
+	kfree(epayload);
+}
+
+/*
+ * encrypted_update - update the master key description
+ *
+ * Change the master key description for an existing encrypted key.
+ * The next read will return an encrypted datablob using the new
+ * master key description.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
+{
+	struct encrypted_key_payload *epayload = key->payload.data[0];
+	struct encrypted_key_payload *new_epayload;
+	char *buf;
+	char *new_master_desc = NULL;
+	const char *format = NULL;
+	size_t datalen = prep->datalen;
+	int ret = 0;
+
+	if (key_is_negative(key))
+		return -ENOKEY;
+	if (datalen <= 0 || datalen > 32767 || !prep->data)
+		return -EINVAL;
+
+	buf = kmalloc(datalen + 1, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf[datalen] = 0;
+	memcpy(buf, prep->data, datalen);
+	ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL);
+	if (ret < 0)
+		goto out;
+
+	ret = valid_master_desc(new_master_desc, epayload->master_desc);
+	if (ret < 0)
+		goto out;
+
+	new_epayload = encrypted_key_alloc(key, epayload->format,
+					   new_master_desc, epayload->datalen);
+	if (IS_ERR(new_epayload)) {
+		ret = PTR_ERR(new_epayload);
+		goto out;
+	}
+
+	__ekey_init(new_epayload, epayload->format, new_master_desc,
+		    epayload->datalen);
+
+	memcpy(new_epayload->iv, epayload->iv, ivsize);
+	memcpy(new_epayload->payload_data, epayload->payload_data,
+	       epayload->payload_datalen);
+
+	rcu_assign_keypointer(key, new_epayload);
+	call_rcu(&epayload->rcu, encrypted_rcu_free);
+out:
+	kfree(buf);
+	return ret;
+}
+
+/*
+ * encrypted_read - format and copy the encrypted data to userspace
+ *
+ * The resulting datablob format is:
+ * <master-key name> <decrypted data length> <encrypted iv> <encrypted data>
+ *
+ * On success, return to userspace the encrypted key datablob size.
+ */
+static long encrypted_read(const struct key *key, char __user *buffer,
+			   size_t buflen)
+{
+	struct encrypted_key_payload *epayload;
+	struct key *mkey;
+	const u8 *master_key;
+	size_t master_keylen;
+	char derived_key[HASH_SIZE];
+	char *ascii_buf;
+	size_t asciiblob_len;
+	int ret;
+
+	epayload = rcu_dereference_key(key);
+
+	/* returns the hex encoded iv, encrypted-data, and hmac as ascii */
+	asciiblob_len = epayload->datablob_len + ivsize + 1
+	    + roundup(epayload->decrypted_datalen, blksize)
+	    + (HASH_SIZE * 2);
+
+	if (!buffer || buflen < asciiblob_len)
+		return asciiblob_len;
+
+	mkey = request_master_key(epayload, &master_key, &master_keylen);
+	if (IS_ERR(mkey))
+		return PTR_ERR(mkey);
+
+	ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen);
+	if (ret < 0)
+		goto out;
+
+	ret = derived_key_encrypt(epayload, derived_key, sizeof derived_key);
+	if (ret < 0)
+		goto out;
+
+	ret = datablob_hmac_append(epayload, master_key, master_keylen);
+	if (ret < 0)
+		goto out;
+
+	ascii_buf = datablob_format(epayload, asciiblob_len);
+	if (!ascii_buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	up_read(&mkey->sem);
+	key_put(mkey);
+
+	if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
+		ret = -EFAULT;
+	kfree(ascii_buf);
+
+	return asciiblob_len;
+out:
+	up_read(&mkey->sem);
+	key_put(mkey);
+	return ret;
+}
+
+/*
+ * encrypted_destroy - before freeing the key, clear the decrypted data
+ *
+ * Before freeing the key, clear the memory containing the decrypted
+ * key data.
+ */
+static void encrypted_destroy(struct key *key)
+{
+	struct encrypted_key_payload *epayload = key->payload.data[0];
+
+	if (!epayload)
+		return;
+
+	memset(epayload->decrypted_data, 0, epayload->decrypted_datalen);
+	kfree(key->payload.data[0]);
+}
+
+struct key_type key_type_encrypted = {
+	.name = "encrypted",
+	.instantiate = encrypted_instantiate,
+	.update = encrypted_update,
+	.destroy = encrypted_destroy,
+	.describe = user_describe,
+	.read = encrypted_read,
+};
+EXPORT_SYMBOL_GPL(key_type_encrypted);
+
+static void encrypted_shash_release(void)
+{
+	if (hashalg)
+		crypto_free_shash(hashalg);
+	if (hmacalg)
+		crypto_free_shash(hmacalg);
+}
+
+static int __init encrypted_shash_alloc(void)
+{
+	int ret;
+
+	hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hmacalg)) {
+		pr_info("encrypted_key: could not allocate crypto %s\n",
+			hmac_alg);
+		return PTR_ERR(hmacalg);
+	}
+
+	hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hashalg)) {
+		pr_info("encrypted_key: could not allocate crypto %s\n",
+			hash_alg);
+		ret = PTR_ERR(hashalg);
+		goto hashalg_fail;
+	}
+
+	return 0;
+
+hashalg_fail:
+	crypto_free_shash(hmacalg);
+	return ret;
+}
+
+static int __init init_encrypted(void)
+{
+	int ret;
+
+	ret = encrypted_shash_alloc();
+	if (ret < 0)
+		return ret;
+	ret = aes_get_sizes();
+	if (ret < 0)
+		goto out;
+	ret = register_key_type(&key_type_encrypted);
+	if (ret < 0)
+		goto out;
+	return 0;
+out:
+	encrypted_shash_release();
+	return ret;
+
+}
+
+static void __exit cleanup_encrypted(void)
+{
+	encrypted_shash_release();
+	unregister_key_type(&key_type_encrypted);
+}
+
+late_initcall(init_encrypted);
+module_exit(cleanup_encrypted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/encrypted-keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h
new file mode 100644
index 0000000..47802c0
--- /dev/null
+++ b/security/keys/encrypted-keys/encrypted.h
@@ -0,0 +1,66 @@
+#ifndef __ENCRYPTED_KEY_H
+#define __ENCRYPTED_KEY_H
+
+#define ENCRYPTED_DEBUG 0
+#if defined(CONFIG_TRUSTED_KEYS) || \
+  (defined(CONFIG_TRUSTED_KEYS_MODULE) && defined(CONFIG_ENCRYPTED_KEYS_MODULE))
+extern struct key *request_trusted_key(const char *trusted_desc,
+				       const u8 **master_key, size_t *master_keylen);
+#else
+static inline struct key *request_trusted_key(const char *trusted_desc,
+					      const u8 **master_key,
+					      size_t *master_keylen)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
+
+#if ENCRYPTED_DEBUG
+static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+{
+	print_hex_dump(KERN_ERR, "master key: ", DUMP_PREFIX_NONE, 32, 1,
+		       master_key, master_keylen, 0);
+}
+
+static inline void dump_decrypted_data(struct encrypted_key_payload *epayload)
+{
+	print_hex_dump(KERN_ERR, "decrypted data: ", DUMP_PREFIX_NONE, 32, 1,
+		       epayload->decrypted_data,
+		       epayload->decrypted_datalen, 0);
+}
+
+static inline void dump_encrypted_data(struct encrypted_key_payload *epayload,
+				       unsigned int encrypted_datalen)
+{
+	print_hex_dump(KERN_ERR, "encrypted data: ", DUMP_PREFIX_NONE, 32, 1,
+		       epayload->encrypted_data, encrypted_datalen, 0);
+}
+
+static inline void dump_hmac(const char *str, const u8 *digest,
+			     unsigned int hmac_size)
+{
+	if (str)
+		pr_info("encrypted_key: %s", str);
+	print_hex_dump(KERN_ERR, "hmac: ", DUMP_PREFIX_NONE, 32, 1, digest,
+		       hmac_size, 0);
+}
+#else
+static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+{
+}
+
+static inline void dump_decrypted_data(struct encrypted_key_payload *epayload)
+{
+}
+
+static inline void dump_encrypted_data(struct encrypted_key_payload *epayload,
+				       unsigned int encrypted_datalen)
+{
+}
+
+static inline void dump_hmac(const char *str, const u8 *digest,
+			     unsigned int hmac_size)
+{
+}
+#endif
+#endif
diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c
new file mode 100644
index 0000000..b5b4812
--- /dev/null
+++ b/security/keys/encrypted-keys/masterkey_trusted.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ *                    TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/security/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include "encrypted.h"
+
+/*
+ * request_trusted_key - request the trusted key
+ *
+ * Trusted keys are sealed to PCRs and other metadata. Although userspace
+ * manages both trusted/encrypted key-types, like the encrypted key type
+ * data, trusted key type data is not visible decrypted from userspace.
+ */
+struct key *request_trusted_key(const char *trusted_desc,
+				const u8 **master_key, size_t *master_keylen)
+{
+	struct trusted_key_payload *tpayload;
+	struct key *tkey;
+
+	tkey = request_key(&key_type_trusted, trusted_desc, NULL);
+	if (IS_ERR(tkey))
+		goto error;
+
+	down_read(&tkey->sem);
+	tpayload = tkey->payload.data[0];
+	*master_key = tpayload->key;
+	*master_keylen = tpayload->key_len;
+error:
+	return tkey;
+}
diff --git a/security/keys/gc.c b/security/keys/gc.c
new file mode 100644
index 0000000..1659094
--- /dev/null
+++ b/security/keys/gc.c
@@ -0,0 +1,360 @@
+/* Key garbage collector
+ *
+ * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <keys/keyring-type.h>
+#include "internal.h"
+
+/*
+ * Delay between key revocation/expiry in seconds
+ */
+unsigned key_gc_delay = 5 * 60;
+
+/*
+ * Reaper for unused keys.
+ */
+static void key_garbage_collector(struct work_struct *work);
+DECLARE_WORK(key_gc_work, key_garbage_collector);
+
+/*
+ * Reaper for links from keyrings to dead keys.
+ */
+static void key_gc_timer_func(unsigned long);
+static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
+
+static time_t key_gc_next_run = LONG_MAX;
+static struct key_type *key_gc_dead_keytype;
+
+static unsigned long key_gc_flags;
+#define KEY_GC_KEY_EXPIRED	0	/* A key expired and needs unlinking */
+#define KEY_GC_REAP_KEYTYPE	1	/* A keytype is being unregistered */
+#define KEY_GC_REAPING_KEYTYPE	2	/* Cleared when keytype reaped */
+
+
+/*
+ * Any key whose type gets unregistered will be re-typed to this if it can't be
+ * immediately unlinked.
+ */
+struct key_type key_type_dead = {
+	.name = ".dead",
+};
+
+/*
+ * Schedule a garbage collection run.
+ * - time precision isn't particularly important
+ */
+void key_schedule_gc(time_t gc_at)
+{
+	unsigned long expires;
+	time_t now = current_kernel_time().tv_sec;
+
+	kenter("%ld", gc_at - now);
+
+	if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
+		kdebug("IMMEDIATE");
+		schedule_work(&key_gc_work);
+	} else if (gc_at < key_gc_next_run) {
+		kdebug("DEFERRED");
+		key_gc_next_run = gc_at;
+		expires = jiffies + (gc_at - now) * HZ;
+		mod_timer(&key_gc_timer, expires);
+	}
+}
+
+/*
+ * Schedule a dead links collection run.
+ */
+void key_schedule_gc_links(void)
+{
+	set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
+	schedule_work(&key_gc_work);
+}
+
+/*
+ * Some key's cleanup time was met after it expired, so we need to get the
+ * reaper to go through a cycle finding expired keys.
+ */
+static void key_gc_timer_func(unsigned long data)
+{
+	kenter("");
+	key_gc_next_run = LONG_MAX;
+	key_schedule_gc_links();
+}
+
+/*
+ * Reap keys of dead type.
+ *
+ * We use three flags to make sure we see three complete cycles of the garbage
+ * collector: the first to mark keys of that type as being dead, the second to
+ * collect dead links and the third to clean up the dead keys.  We have to be
+ * careful as there may already be a cycle in progress.
+ *
+ * The caller must be holding key_types_sem.
+ */
+void key_gc_keytype(struct key_type *ktype)
+{
+	kenter("%s", ktype->name);
+
+	key_gc_dead_keytype = ktype;
+	set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+	smp_mb();
+	set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
+
+	kdebug("schedule");
+	schedule_work(&key_gc_work);
+
+	kdebug("sleep");
+	wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE,
+		    TASK_UNINTERRUPTIBLE);
+
+	key_gc_dead_keytype = NULL;
+	kleave("");
+}
+
+/*
+ * Garbage collect a list of unreferenced, detached keys
+ */
+static noinline void key_gc_unused_keys(struct list_head *keys)
+{
+	while (!list_empty(keys)) {
+		struct key *key =
+			list_entry(keys->next, struct key, graveyard_link);
+		short state = key->state;
+
+		list_del(&key->graveyard_link);
+
+		kdebug("- %u", key->serial);
+		key_check(key);
+
+		/* Throw away the key data if the key is instantiated */
+		if (state == KEY_IS_POSITIVE && key->type->destroy)
+			key->type->destroy(key);
+
+		security_key_free(key);
+
+		/* deal with the user's key tracking and quota */
+		if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+			spin_lock(&key->user->lock);
+			key->user->qnkeys--;
+			key->user->qnbytes -= key->quotalen;
+			spin_unlock(&key->user->lock);
+		}
+
+		atomic_dec(&key->user->nkeys);
+		if (state != KEY_IS_UNINSTANTIATED)
+			atomic_dec(&key->user->nikeys);
+
+		key_user_put(key->user);
+
+		kfree(key->description);
+
+#ifdef KEY_DEBUGGING
+		key->magic = KEY_DEBUG_MAGIC_X;
+#endif
+		kmem_cache_free(key_jar, key);
+	}
+}
+
+/*
+ * Garbage collector for unused keys.
+ *
+ * This is done in process context so that we don't have to disable interrupts
+ * all over the place.  key_put() schedules this rather than trying to do the
+ * cleanup itself, which means key_put() doesn't have to sleep.
+ */
+static void key_garbage_collector(struct work_struct *work)
+{
+	static LIST_HEAD(graveyard);
+	static u8 gc_state;		/* Internal persistent state */
+#define KEY_GC_REAP_AGAIN	0x01	/* - Need another cycle */
+#define KEY_GC_REAPING_LINKS	0x02	/* - We need to reap links */
+#define KEY_GC_SET_TIMER	0x04	/* - We need to restart the timer */
+#define KEY_GC_REAPING_DEAD_1	0x10	/* - We need to mark dead keys */
+#define KEY_GC_REAPING_DEAD_2	0x20	/* - We need to reap dead key links */
+#define KEY_GC_REAPING_DEAD_3	0x40	/* - We need to reap dead keys */
+#define KEY_GC_FOUND_DEAD_KEY	0x80	/* - We found at least one dead key */
+
+	struct rb_node *cursor;
+	struct key *key;
+	time_t new_timer, limit;
+
+	kenter("[%lx,%x]", key_gc_flags, gc_state);
+
+	limit = current_kernel_time().tv_sec;
+	if (limit > key_gc_delay)
+		limit -= key_gc_delay;
+	else
+		limit = key_gc_delay;
+
+	/* Work out what we're going to be doing in this pass */
+	gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
+	gc_state <<= 1;
+	if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
+		gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
+
+	if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
+		gc_state |= KEY_GC_REAPING_DEAD_1;
+	kdebug("new pass %x", gc_state);
+
+	new_timer = LONG_MAX;
+
+	/* As only this function is permitted to remove things from the key
+	 * serial tree, if cursor is non-NULL then it will always point to a
+	 * valid node in the tree - even if lock got dropped.
+	 */
+	spin_lock(&key_serial_lock);
+	cursor = rb_first(&key_serial_tree);
+
+continue_scanning:
+	while (cursor) {
+		key = rb_entry(cursor, struct key, serial_node);
+		cursor = rb_next(cursor);
+
+		if (atomic_read(&key->usage) == 0)
+			goto found_unreferenced_key;
+
+		if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
+			if (key->type == key_gc_dead_keytype) {
+				gc_state |= KEY_GC_FOUND_DEAD_KEY;
+				set_bit(KEY_FLAG_DEAD, &key->flags);
+				key->perm = 0;
+				goto skip_dead_key;
+			}
+		}
+
+		if (gc_state & KEY_GC_SET_TIMER) {
+			if (key->expiry > limit && key->expiry < new_timer) {
+				kdebug("will expire %x in %ld",
+				       key_serial(key), key->expiry - limit);
+				new_timer = key->expiry;
+			}
+		}
+
+		if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
+			if (key->type == key_gc_dead_keytype)
+				gc_state |= KEY_GC_FOUND_DEAD_KEY;
+
+		if ((gc_state & KEY_GC_REAPING_LINKS) ||
+		    unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
+			if (key->type == &key_type_keyring)
+				goto found_keyring;
+		}
+
+		if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
+			if (key->type == key_gc_dead_keytype)
+				goto destroy_dead_key;
+
+	skip_dead_key:
+		if (spin_is_contended(&key_serial_lock) || need_resched())
+			goto contended;
+	}
+
+contended:
+	spin_unlock(&key_serial_lock);
+
+maybe_resched:
+	if (cursor) {
+		cond_resched();
+		spin_lock(&key_serial_lock);
+		goto continue_scanning;
+	}
+
+	/* We've completed the pass.  Set the timer if we need to and queue a
+	 * new cycle if necessary.  We keep executing cycles until we find one
+	 * where we didn't reap any keys.
+	 */
+	kdebug("pass complete");
+
+	if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
+		new_timer += key_gc_delay;
+		key_schedule_gc(new_timer);
+	}
+
+	if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) ||
+	    !list_empty(&graveyard)) {
+		/* Make sure that all pending keyring payload destructions are
+		 * fulfilled and that people aren't now looking at dead or
+		 * dying keys that they don't have a reference upon or a link
+		 * to.
+		 */
+		kdebug("gc sync");
+		synchronize_rcu();
+	}
+
+	if (!list_empty(&graveyard)) {
+		kdebug("gc keys");
+		key_gc_unused_keys(&graveyard);
+	}
+
+	if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
+				 KEY_GC_REAPING_DEAD_2))) {
+		if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
+			/* No remaining dead keys: short circuit the remaining
+			 * keytype reap cycles.
+			 */
+			kdebug("dead short");
+			gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
+			gc_state |= KEY_GC_REAPING_DEAD_3;
+		} else {
+			gc_state |= KEY_GC_REAP_AGAIN;
+		}
+	}
+
+	if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
+		kdebug("dead wake");
+		smp_mb();
+		clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+		wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
+	}
+
+	if (gc_state & KEY_GC_REAP_AGAIN)
+		schedule_work(&key_gc_work);
+	kleave(" [end %x]", gc_state);
+	return;
+
+	/* We found an unreferenced key - once we've removed it from the tree,
+	 * we can safely drop the lock.
+	 */
+found_unreferenced_key:
+	kdebug("unrefd key %d", key->serial);
+	rb_erase(&key->serial_node, &key_serial_tree);
+	spin_unlock(&key_serial_lock);
+
+	list_add_tail(&key->graveyard_link, &graveyard);
+	gc_state |= KEY_GC_REAP_AGAIN;
+	goto maybe_resched;
+
+	/* We found a keyring and we need to check the payload for links to
+	 * dead or expired keys.  We don't flag another reap immediately as we
+	 * have to wait for the old payload to be destroyed by RCU before we
+	 * can reap the keys to which it refers.
+	 */
+found_keyring:
+	spin_unlock(&key_serial_lock);
+	keyring_gc(key, limit);
+	goto maybe_resched;
+
+	/* We found a dead key that is still referenced.  Reset its type and
+	 * destroy its payload with its semaphore held.
+	 */
+destroy_dead_key:
+	spin_unlock(&key_serial_lock);
+	kdebug("destroy key %d", key->serial);
+	down_write(&key->sem);
+	key->type = &key_type_dead;
+	if (key_gc_dead_keytype->destroy)
+		key_gc_dead_keytype->destroy(key);
+	memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
+	up_write(&key->sem);
+	goto maybe_resched;
+}
diff --git a/security/keys/internal.h b/security/keys/internal.h
new file mode 100644
index 0000000..51ffb9c
--- /dev/null
+++ b/security/keys/internal.h
@@ -0,0 +1,278 @@
+/* Authentication token and access key management internal defs
+ *
+ * Copyright (C) 2003-5, 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _INTERNAL_H
+#define _INTERNAL_H
+
+#include <linux/sched.h>
+#include <linux/key-type.h>
+#include <linux/task_work.h>
+
+struct iovec;
+
+#ifdef __KDEBUG
+#define kenter(FMT, ...) \
+	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+	printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+	printk(KERN_DEBUG "   "FMT"\n", ##__VA_ARGS__)
+#else
+#define kenter(FMT, ...) \
+	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
+#endif
+
+extern struct key_type key_type_dead;
+extern struct key_type key_type_user;
+extern struct key_type key_type_logon;
+
+/*****************************************************************************/
+/*
+ * Keep track of keys for a user.
+ *
+ * This needs to be separate to user_struct to avoid a refcount-loop
+ * (user_struct pins some keyrings which pin this struct).
+ *
+ * We also keep track of keys under request from userspace for this UID here.
+ */
+struct key_user {
+	struct rb_node		node;
+	struct mutex		cons_lock;	/* construction initiation lock */
+	spinlock_t		lock;
+	atomic_t		usage;		/* for accessing qnkeys & qnbytes */
+	atomic_t		nkeys;		/* number of keys */
+	atomic_t		nikeys;		/* number of instantiated keys */
+	kuid_t			uid;
+	int			qnkeys;		/* number of keys allocated to this user */
+	int			qnbytes;	/* number of bytes allocated to this user */
+};
+
+extern struct rb_root	key_user_tree;
+extern spinlock_t	key_user_lock;
+extern struct key_user	root_key_user;
+
+extern struct key_user *key_user_lookup(kuid_t uid);
+extern void key_user_put(struct key_user *user);
+
+/*
+ * Key quota limits.
+ * - root has its own separate limits to everyone else
+ */
+extern unsigned key_quota_root_maxkeys;
+extern unsigned key_quota_root_maxbytes;
+extern unsigned key_quota_maxkeys;
+extern unsigned key_quota_maxbytes;
+
+#define KEYQUOTA_LINK_BYTES	4		/* a link in a keyring is worth 4 bytes */
+
+
+extern struct kmem_cache *key_jar;
+extern struct rb_root key_serial_tree;
+extern spinlock_t key_serial_lock;
+extern struct mutex key_construction_mutex;
+extern wait_queue_head_t request_key_conswq;
+
+
+extern struct key_type *key_type_lookup(const char *type);
+extern void key_type_put(struct key_type *ktype);
+
+extern int __key_link_begin(struct key *keyring,
+			    const struct keyring_index_key *index_key,
+			    struct assoc_array_edit **_edit);
+extern int __key_link_check_live_key(struct key *keyring, struct key *key);
+extern void __key_link(struct key *key, struct assoc_array_edit **_edit);
+extern void __key_link_end(struct key *keyring,
+			   const struct keyring_index_key *index_key,
+			   struct assoc_array_edit *edit);
+
+extern key_ref_t find_key_to_update(key_ref_t keyring_ref,
+				    const struct keyring_index_key *index_key);
+
+extern struct key *keyring_search_instkey(struct key *keyring,
+					  key_serial_t target_id);
+
+extern int iterate_over_keyring(const struct key *keyring,
+				int (*func)(const struct key *key, void *data),
+				void *data);
+
+struct keyring_search_context {
+	struct keyring_index_key index_key;
+	const struct cred	*cred;
+	struct key_match_data	match_data;
+	unsigned		flags;
+#define KEYRING_SEARCH_NO_STATE_CHECK	0x0001	/* Skip state checks */
+#define KEYRING_SEARCH_DO_STATE_CHECK	0x0002	/* Override NO_STATE_CHECK */
+#define KEYRING_SEARCH_NO_UPDATE_TIME	0x0004	/* Don't update times */
+#define KEYRING_SEARCH_NO_CHECK_PERM	0x0008	/* Don't check permissions */
+#define KEYRING_SEARCH_DETECT_TOO_DEEP	0x0010	/* Give an error on excessive depth */
+#define KEYRING_SEARCH_SKIP_EXPIRED	0x0020	/* Ignore expired keys (intention to replace) */
+
+	int (*iterator)(const void *object, void *iterator_data);
+
+	/* Internal stuff */
+	int			skipped_ret;
+	bool			possessed;
+	key_ref_t		result;
+	struct timespec		now;
+};
+
+extern bool key_default_cmp(const struct key *key,
+			    const struct key_match_data *match_data);
+extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+				    struct keyring_search_context *ctx);
+
+extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
+extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
+
+extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
+
+extern int install_user_keyrings(void);
+extern int install_thread_keyring_to_cred(struct cred *);
+extern int install_process_keyring_to_cred(struct cred *);
+extern int install_session_keyring_to_cred(struct cred *, struct key *);
+
+extern struct key *request_key_and_link(struct key_type *type,
+					const char *description,
+					const void *callout_info,
+					size_t callout_len,
+					void *aux,
+					struct key *dest_keyring,
+					unsigned long flags);
+
+extern bool lookup_user_key_possessed(const struct key *key,
+				      const struct key_match_data *match_data);
+extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
+				 key_perm_t perm);
+#define KEY_LOOKUP_CREATE	0x01
+#define KEY_LOOKUP_PARTIAL	0x02
+#define KEY_LOOKUP_FOR_UNLINK	0x04
+
+extern long join_session_keyring(const char *name);
+extern void key_change_session_keyring(struct callback_head *twork);
+
+extern struct work_struct key_gc_work;
+extern unsigned key_gc_delay;
+extern void keyring_gc(struct key *keyring, time_t limit);
+extern void key_schedule_gc(time_t gc_at);
+extern void key_schedule_gc_links(void);
+extern void key_gc_keytype(struct key_type *ktype);
+
+extern int key_task_permission(const key_ref_t key_ref,
+			       const struct cred *cred,
+			       key_perm_t perm);
+
+/*
+ * Check to see whether permission is granted to use a key in the desired way.
+ */
+static inline int key_permission(const key_ref_t key_ref, unsigned perm)
+{
+	return key_task_permission(key_ref, current_cred(), perm);
+}
+
+/*
+ * Authorisation record for request_key().
+ */
+struct request_key_auth {
+	struct key		*target_key;
+	struct key		*dest_keyring;
+	const struct cred	*cred;
+	void			*callout_info;
+	size_t			callout_len;
+	pid_t			pid;
+};
+
+extern struct key_type key_type_request_key_auth;
+extern struct key *request_key_auth_new(struct key *target,
+					const void *callout_info,
+					size_t callout_len,
+					struct key *dest_keyring);
+
+extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
+
+/*
+ * Determine whether a key is dead.
+ */
+static inline bool key_is_dead(const struct key *key, time_t limit)
+{
+	return
+		key->flags & ((1 << KEY_FLAG_DEAD) |
+			      (1 << KEY_FLAG_INVALIDATED)) ||
+		(key->expiry > 0 && key->expiry <= limit);
+}
+
+/*
+ * keyctl() functions
+ */
+extern long keyctl_get_keyring_ID(key_serial_t, int);
+extern long keyctl_join_session_keyring(const char __user *);
+extern long keyctl_update_key(key_serial_t, const void __user *, size_t);
+extern long keyctl_revoke_key(key_serial_t);
+extern long keyctl_keyring_clear(key_serial_t);
+extern long keyctl_keyring_link(key_serial_t, key_serial_t);
+extern long keyctl_keyring_unlink(key_serial_t, key_serial_t);
+extern long keyctl_describe_key(key_serial_t, char __user *, size_t);
+extern long keyctl_keyring_search(key_serial_t, const char __user *,
+				  const char __user *, key_serial_t);
+extern long keyctl_read_key(key_serial_t, char __user *, size_t);
+extern long keyctl_chown_key(key_serial_t, uid_t, gid_t);
+extern long keyctl_setperm_key(key_serial_t, key_perm_t);
+extern long keyctl_instantiate_key(key_serial_t, const void __user *,
+				   size_t, key_serial_t);
+extern long keyctl_negate_key(key_serial_t, unsigned, key_serial_t);
+extern long keyctl_set_reqkey_keyring(int);
+extern long keyctl_set_timeout(key_serial_t, unsigned);
+extern long keyctl_assume_authority(key_serial_t);
+extern long keyctl_get_security(key_serial_t keyid, char __user *buffer,
+				size_t buflen);
+extern long keyctl_session_to_parent(void);
+extern long keyctl_reject_key(key_serial_t, unsigned, unsigned, key_serial_t);
+extern long keyctl_instantiate_key_iov(key_serial_t,
+				       const struct iovec __user *,
+				       unsigned, key_serial_t);
+extern long keyctl_invalidate_key(key_serial_t);
+
+struct iov_iter;
+extern long keyctl_instantiate_key_common(key_serial_t,
+					  struct iov_iter *,
+					  key_serial_t);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+extern long keyctl_get_persistent(uid_t, key_serial_t);
+extern unsigned persistent_keyring_expiry;
+#else
+static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
+{
+	return -EOPNOTSUPP;
+}
+#endif
+
+/*
+ * Debugging key validation
+ */
+#ifdef KEY_DEBUGGING
+extern void __key_check(const struct key *);
+
+static inline void key_check(const struct key *key)
+{
+	if (key && (IS_ERR(key) || key->magic != KEY_DEBUG_MAGIC))
+		__key_check(key);
+}
+
+#else
+
+#define key_check(key) do {} while(0)
+
+#endif
+
+#endif /* _INTERNAL_H */
diff --git a/security/keys/key.c b/security/keys/key.c
new file mode 100644
index 0000000..4d971bf
--- /dev/null
+++ b/security/keys/key.c
@@ -0,0 +1,1160 @@
+/* Basic authentication token and access key management
+ *
+ * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/workqueue.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include "internal.h"
+
+struct kmem_cache *key_jar;
+struct rb_root		key_serial_tree; /* tree of keys indexed by serial */
+DEFINE_SPINLOCK(key_serial_lock);
+
+struct rb_root	key_user_tree; /* tree of quota records indexed by UID */
+DEFINE_SPINLOCK(key_user_lock);
+
+unsigned int key_quota_root_maxkeys = 1000000;	/* root's key count quota */
+unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
+unsigned int key_quota_maxkeys = 200;		/* general key count quota */
+unsigned int key_quota_maxbytes = 20000;	/* general key space quota */
+
+static LIST_HEAD(key_types_list);
+static DECLARE_RWSEM(key_types_sem);
+
+/* We serialise key instantiation and link */
+DEFINE_MUTEX(key_construction_mutex);
+
+#ifdef KEY_DEBUGGING
+void __key_check(const struct key *key)
+{
+	printk("__key_check: key %p {%08x} should be {%08x}\n",
+	       key, key->magic, KEY_DEBUG_MAGIC);
+	BUG();
+}
+#endif
+
+/*
+ * Get the key quota record for a user, allocating a new record if one doesn't
+ * already exist.
+ */
+struct key_user *key_user_lookup(kuid_t uid)
+{
+	struct key_user *candidate = NULL, *user;
+	struct rb_node *parent = NULL;
+	struct rb_node **p;
+
+try_again:
+	p = &key_user_tree.rb_node;
+	spin_lock(&key_user_lock);
+
+	/* search the tree for a user record with a matching UID */
+	while (*p) {
+		parent = *p;
+		user = rb_entry(parent, struct key_user, node);
+
+		if (uid_lt(uid, user->uid))
+			p = &(*p)->rb_left;
+		else if (uid_gt(uid, user->uid))
+			p = &(*p)->rb_right;
+		else
+			goto found;
+	}
+
+	/* if we get here, we failed to find a match in the tree */
+	if (!candidate) {
+		/* allocate a candidate user record if we don't already have
+		 * one */
+		spin_unlock(&key_user_lock);
+
+		user = NULL;
+		candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
+		if (unlikely(!candidate))
+			goto out;
+
+		/* the allocation may have scheduled, so we need to repeat the
+		 * search lest someone else added the record whilst we were
+		 * asleep */
+		goto try_again;
+	}
+
+	/* if we get here, then the user record still hadn't appeared on the
+	 * second pass - so we use the candidate record */
+	atomic_set(&candidate->usage, 1);
+	atomic_set(&candidate->nkeys, 0);
+	atomic_set(&candidate->nikeys, 0);
+	candidate->uid = uid;
+	candidate->qnkeys = 0;
+	candidate->qnbytes = 0;
+	spin_lock_init(&candidate->lock);
+	mutex_init(&candidate->cons_lock);
+
+	rb_link_node(&candidate->node, parent, p);
+	rb_insert_color(&candidate->node, &key_user_tree);
+	spin_unlock(&key_user_lock);
+	user = candidate;
+	goto out;
+
+	/* okay - we found a user record for this UID */
+found:
+	atomic_inc(&user->usage);
+	spin_unlock(&key_user_lock);
+	kfree(candidate);
+out:
+	return user;
+}
+
+/*
+ * Dispose of a user structure
+ */
+void key_user_put(struct key_user *user)
+{
+	if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
+		rb_erase(&user->node, &key_user_tree);
+		spin_unlock(&key_user_lock);
+
+		kfree(user);
+	}
+}
+
+/*
+ * Allocate a serial number for a key.  These are assigned randomly to avoid
+ * security issues through covert channel problems.
+ */
+static inline void key_alloc_serial(struct key *key)
+{
+	struct rb_node *parent, **p;
+	struct key *xkey;
+
+	/* propose a random serial number and look for a hole for it in the
+	 * serial number tree */
+	do {
+		get_random_bytes(&key->serial, sizeof(key->serial));
+
+		key->serial >>= 1; /* negative numbers are not permitted */
+	} while (key->serial < 3);
+
+	spin_lock(&key_serial_lock);
+
+attempt_insertion:
+	parent = NULL;
+	p = &key_serial_tree.rb_node;
+
+	while (*p) {
+		parent = *p;
+		xkey = rb_entry(parent, struct key, serial_node);
+
+		if (key->serial < xkey->serial)
+			p = &(*p)->rb_left;
+		else if (key->serial > xkey->serial)
+			p = &(*p)->rb_right;
+		else
+			goto serial_exists;
+	}
+
+	/* we've found a suitable hole - arrange for this key to occupy it */
+	rb_link_node(&key->serial_node, parent, p);
+	rb_insert_color(&key->serial_node, &key_serial_tree);
+
+	spin_unlock(&key_serial_lock);
+	return;
+
+	/* we found a key with the proposed serial number - walk the tree from
+	 * that point looking for the next unused serial number */
+serial_exists:
+	for (;;) {
+		key->serial++;
+		if (key->serial < 3) {
+			key->serial = 3;
+			goto attempt_insertion;
+		}
+
+		parent = rb_next(parent);
+		if (!parent)
+			goto attempt_insertion;
+
+		xkey = rb_entry(parent, struct key, serial_node);
+		if (key->serial < xkey->serial)
+			goto attempt_insertion;
+	}
+}
+
+/**
+ * key_alloc - Allocate a key of the specified type.
+ * @type: The type of key to allocate.
+ * @desc: The key description to allow the key to be searched out.
+ * @uid: The owner of the new key.
+ * @gid: The group ID for the new key's group permissions.
+ * @cred: The credentials specifying UID namespace.
+ * @perm: The permissions mask of the new key.
+ * @flags: Flags specifying quota properties.
+ *
+ * Allocate a key of the specified type with the attributes given.  The key is
+ * returned in an uninstantiated state and the caller needs to instantiate the
+ * key before returning.
+ *
+ * The user's key count quota is updated to reflect the creation of the key and
+ * the user's key data quota has the default for the key type reserved.  The
+ * instantiation function should amend this as necessary.  If insufficient
+ * quota is available, -EDQUOT will be returned.
+ *
+ * The LSM security modules can prevent a key being created, in which case
+ * -EACCES will be returned.
+ *
+ * Returns a pointer to the new key if successful and an error code otherwise.
+ *
+ * Note that the caller needs to ensure the key type isn't uninstantiated.
+ * Internally this can be done by locking key_types_sem.  Externally, this can
+ * be done by either never unregistering the key type, or making sure
+ * key_alloc() calls don't race with module unloading.
+ */
+struct key *key_alloc(struct key_type *type, const char *desc,
+		      kuid_t uid, kgid_t gid, const struct cred *cred,
+		      key_perm_t perm, unsigned long flags)
+{
+	struct key_user *user = NULL;
+	struct key *key;
+	size_t desclen, quotalen;
+	int ret;
+
+	key = ERR_PTR(-EINVAL);
+	if (!desc || !*desc)
+		goto error;
+
+	if (type->vet_description) {
+		ret = type->vet_description(desc);
+		if (ret < 0) {
+			key = ERR_PTR(ret);
+			goto error;
+		}
+	}
+
+	desclen = strlen(desc);
+	quotalen = desclen + 1 + type->def_datalen;
+
+	/* get hold of the key tracking for this user */
+	user = key_user_lookup(uid);
+	if (!user)
+		goto no_memory_1;
+
+	/* check that the user's quota permits allocation of another key and
+	 * its description */
+	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+		unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
+			key_quota_root_maxkeys : key_quota_maxkeys;
+		unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
+			key_quota_root_maxbytes : key_quota_maxbytes;
+
+		spin_lock(&user->lock);
+		if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
+			if (user->qnkeys + 1 >= maxkeys ||
+			    user->qnbytes + quotalen >= maxbytes ||
+			    user->qnbytes + quotalen < user->qnbytes)
+				goto no_quota;
+		}
+
+		user->qnkeys++;
+		user->qnbytes += quotalen;
+		spin_unlock(&user->lock);
+	}
+
+	/* allocate and initialise the key and its description */
+	key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
+	if (!key)
+		goto no_memory_2;
+
+	key->index_key.desc_len = desclen;
+	key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
+	if (!key->index_key.description)
+		goto no_memory_3;
+
+	atomic_set(&key->usage, 1);
+	init_rwsem(&key->sem);
+	lockdep_set_class(&key->sem, &type->lock_class);
+	key->index_key.type = type;
+	key->user = user;
+	key->quotalen = quotalen;
+	key->datalen = type->def_datalen;
+	key->uid = uid;
+	key->gid = gid;
+	key->perm = perm;
+
+	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
+		key->flags |= 1 << KEY_FLAG_IN_QUOTA;
+	if (flags & KEY_ALLOC_TRUSTED)
+		key->flags |= 1 << KEY_FLAG_TRUSTED;
+	if (flags & KEY_ALLOC_UID_KEYRING)
+		key->flags |= 1 << KEY_FLAG_UID_KEYRING;
+
+#ifdef KEY_DEBUGGING
+	key->magic = KEY_DEBUG_MAGIC;
+#endif
+
+	/* let the security module know about the key */
+	ret = security_key_alloc(key, cred, flags);
+	if (ret < 0)
+		goto security_error;
+
+	/* publish the key by giving it a serial number */
+	atomic_inc(&user->nkeys);
+	key_alloc_serial(key);
+
+error:
+	return key;
+
+security_error:
+	kfree(key->description);
+	kmem_cache_free(key_jar, key);
+	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+		spin_lock(&user->lock);
+		user->qnkeys--;
+		user->qnbytes -= quotalen;
+		spin_unlock(&user->lock);
+	}
+	key_user_put(user);
+	key = ERR_PTR(ret);
+	goto error;
+
+no_memory_3:
+	kmem_cache_free(key_jar, key);
+no_memory_2:
+	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+		spin_lock(&user->lock);
+		user->qnkeys--;
+		user->qnbytes -= quotalen;
+		spin_unlock(&user->lock);
+	}
+	key_user_put(user);
+no_memory_1:
+	key = ERR_PTR(-ENOMEM);
+	goto error;
+
+no_quota:
+	spin_unlock(&user->lock);
+	key_user_put(user);
+	key = ERR_PTR(-EDQUOT);
+	goto error;
+}
+EXPORT_SYMBOL(key_alloc);
+
+/**
+ * key_payload_reserve - Adjust data quota reservation for the key's payload
+ * @key: The key to make the reservation for.
+ * @datalen: The amount of data payload the caller now wants.
+ *
+ * Adjust the amount of the owning user's key data quota that a key reserves.
+ * If the amount is increased, then -EDQUOT may be returned if there isn't
+ * enough free quota available.
+ *
+ * If successful, 0 is returned.
+ */
+int key_payload_reserve(struct key *key, size_t datalen)
+{
+	int delta = (int)datalen - key->datalen;
+	int ret = 0;
+
+	key_check(key);
+
+	/* contemplate the quota adjustment */
+	if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+		unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
+			key_quota_root_maxbytes : key_quota_maxbytes;
+
+		spin_lock(&key->user->lock);
+
+		if (delta > 0 &&
+		    (key->user->qnbytes + delta >= maxbytes ||
+		     key->user->qnbytes + delta < key->user->qnbytes)) {
+			ret = -EDQUOT;
+		}
+		else {
+			key->user->qnbytes += delta;
+			key->quotalen += delta;
+		}
+		spin_unlock(&key->user->lock);
+	}
+
+	/* change the recorded data length if that didn't generate an error */
+	if (ret == 0)
+		key->datalen = datalen;
+
+	return ret;
+}
+EXPORT_SYMBOL(key_payload_reserve);
+
+/*
+ * Change the key state to being instantiated.
+ */
+static void mark_key_instantiated(struct key *key, int reject_error)
+{
+	/* Commit the payload before setting the state; barrier versus
+	 * key_read_state().
+	 */
+	smp_store_release(&key->state,
+			  (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
+}
+
+/*
+ * Instantiate a key and link it into the target keyring atomically.  Must be
+ * called with the target keyring's semaphore writelocked.  The target key's
+ * semaphore need not be locked as instantiation is serialised by
+ * key_construction_mutex.
+ */
+static int __key_instantiate_and_link(struct key *key,
+				      struct key_preparsed_payload *prep,
+				      struct key *keyring,
+				      struct key *authkey,
+				      struct assoc_array_edit **_edit)
+{
+	int ret, awaken;
+
+	key_check(key);
+	key_check(keyring);
+
+	awaken = 0;
+	ret = -EBUSY;
+
+	mutex_lock(&key_construction_mutex);
+
+	/* can't instantiate twice */
+	if (key->state == KEY_IS_UNINSTANTIATED) {
+		/* instantiate the key */
+		ret = key->type->instantiate(key, prep);
+
+		if (ret == 0) {
+			/* mark the key as being instantiated */
+			atomic_inc(&key->user->nikeys);
+			mark_key_instantiated(key, 0);
+
+			if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
+				awaken = 1;
+
+			/* and link it into the destination keyring */
+			if (keyring)
+				__key_link(key, _edit);
+
+			/* disable the authorisation key */
+			if (authkey)
+				key_revoke(authkey);
+
+			if (prep->expiry != TIME_T_MAX) {
+				key->expiry = prep->expiry;
+				key_schedule_gc(prep->expiry + key_gc_delay);
+			}
+		}
+	}
+
+	mutex_unlock(&key_construction_mutex);
+
+	/* wake up anyone waiting for a key to be constructed */
+	if (awaken)
+		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
+
+	return ret;
+}
+
+/**
+ * key_instantiate_and_link - Instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @data: The data to use to instantiate the keyring.
+ * @datalen: The length of @data.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Instantiate a key that's in the uninstantiated state using the provided data
+ * and, if successful, link it in to the destination keyring if one is
+ * supplied.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up.  If the key was already instantiated,
+ * -EBUSY will be returned.
+ */
+int key_instantiate_and_link(struct key *key,
+			     const void *data,
+			     size_t datalen,
+			     struct key *keyring,
+			     struct key *authkey)
+{
+	struct key_preparsed_payload prep;
+	struct assoc_array_edit *edit;
+	int ret;
+
+	memset(&prep, 0, sizeof(prep));
+	prep.data = data;
+	prep.datalen = datalen;
+	prep.quotalen = key->type->def_datalen;
+	prep.expiry = TIME_T_MAX;
+	if (key->type->preparse) {
+		ret = key->type->preparse(&prep);
+		if (ret < 0)
+			goto error;
+	}
+
+	if (keyring) {
+		ret = __key_link_begin(keyring, &key->index_key, &edit);
+		if (ret < 0)
+			goto error;
+	}
+
+	ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
+
+	if (keyring)
+		__key_link_end(keyring, &key->index_key, edit);
+
+error:
+	if (key->type->preparse)
+		key->type->free_preparse(&prep);
+	return ret;
+}
+
+EXPORT_SYMBOL(key_instantiate_and_link);
+
+/**
+ * key_reject_and_link - Negatively instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @timeout: The timeout on the negative key.
+ * @error: The error to return when the key is hit.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Negatively instantiate a key that's in the uninstantiated state and, if
+ * successful, set its timeout and stored error and link it in to the
+ * destination keyring if one is supplied.  The key and any links to the key
+ * will be automatically garbage collected after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return the stored error code (typically ENOKEY) until the negative
+ * key expires.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up.  If the key was already instantiated,
+ * -EBUSY will be returned.
+ */
+int key_reject_and_link(struct key *key,
+			unsigned timeout,
+			unsigned error,
+			struct key *keyring,
+			struct key *authkey)
+{
+	struct assoc_array_edit *edit;
+	struct timespec now;
+	int ret, awaken, link_ret = 0;
+
+	key_check(key);
+	key_check(keyring);
+
+	awaken = 0;
+	ret = -EBUSY;
+
+	if (keyring)
+		link_ret = __key_link_begin(keyring, &key->index_key, &edit);
+
+	mutex_lock(&key_construction_mutex);
+
+	/* can't instantiate twice */
+	if (key->state == KEY_IS_UNINSTANTIATED) {
+		/* mark the key as being negatively instantiated */
+		atomic_inc(&key->user->nikeys);
+		mark_key_instantiated(key, -error);
+		now = current_kernel_time();
+		key->expiry = now.tv_sec + timeout;
+		key_schedule_gc(key->expiry + key_gc_delay);
+
+		if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
+			awaken = 1;
+
+		ret = 0;
+
+		/* and link it into the destination keyring */
+		if (keyring && link_ret == 0)
+			__key_link(key, &edit);
+
+		/* disable the authorisation key */
+		if (authkey)
+			key_revoke(authkey);
+	}
+
+	mutex_unlock(&key_construction_mutex);
+
+	if (keyring && link_ret == 0)
+		__key_link_end(keyring, &key->index_key, edit);
+
+	/* wake up anyone waiting for a key to be constructed */
+	if (awaken)
+		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
+
+	return ret == 0 ? link_ret : ret;
+}
+EXPORT_SYMBOL(key_reject_and_link);
+
+/**
+ * key_put - Discard a reference to a key.
+ * @key: The key to discard a reference from.
+ *
+ * Discard a reference to a key, and when all the references are gone, we
+ * schedule the cleanup task to come and pull it out of the tree in process
+ * context at some later time.
+ */
+void key_put(struct key *key)
+{
+	if (key) {
+		key_check(key);
+
+		if (atomic_dec_and_test(&key->usage))
+			schedule_work(&key_gc_work);
+	}
+}
+EXPORT_SYMBOL(key_put);
+
+/*
+ * Find a key by its serial number.
+ */
+struct key *key_lookup(key_serial_t id)
+{
+	struct rb_node *n;
+	struct key *key;
+
+	spin_lock(&key_serial_lock);
+
+	/* search the tree for the specified key */
+	n = key_serial_tree.rb_node;
+	while (n) {
+		key = rb_entry(n, struct key, serial_node);
+
+		if (id < key->serial)
+			n = n->rb_left;
+		else if (id > key->serial)
+			n = n->rb_right;
+		else
+			goto found;
+	}
+
+not_found:
+	key = ERR_PTR(-ENOKEY);
+	goto error;
+
+found:
+	/* pretend it doesn't exist if it is awaiting deletion */
+	if (atomic_read(&key->usage) == 0)
+		goto not_found;
+
+	/* this races with key_put(), but that doesn't matter since key_put()
+	 * doesn't actually change the key
+	 */
+	__key_get(key);
+
+error:
+	spin_unlock(&key_serial_lock);
+	return key;
+}
+
+/*
+ * Find and lock the specified key type against removal.
+ *
+ * We return with the sem read-locked if successful.  If the type wasn't
+ * available -ENOKEY is returned instead.
+ */
+struct key_type *key_type_lookup(const char *type)
+{
+	struct key_type *ktype;
+
+	down_read(&key_types_sem);
+
+	/* look up the key type to see if it's one of the registered kernel
+	 * types */
+	list_for_each_entry(ktype, &key_types_list, link) {
+		if (strcmp(ktype->name, type) == 0)
+			goto found_kernel_type;
+	}
+
+	up_read(&key_types_sem);
+	ktype = ERR_PTR(-ENOKEY);
+
+found_kernel_type:
+	return ktype;
+}
+
+void key_set_timeout(struct key *key, unsigned timeout)
+{
+	struct timespec now;
+	time_t expiry = 0;
+
+	/* make the changes with the locks held to prevent races */
+	down_write(&key->sem);
+
+	if (timeout > 0) {
+		now = current_kernel_time();
+		expiry = now.tv_sec + timeout;
+	}
+
+	key->expiry = expiry;
+	key_schedule_gc(key->expiry + key_gc_delay);
+
+	up_write(&key->sem);
+}
+EXPORT_SYMBOL_GPL(key_set_timeout);
+
+/*
+ * Unlock a key type locked by key_type_lookup().
+ */
+void key_type_put(struct key_type *ktype)
+{
+	up_read(&key_types_sem);
+}
+
+/*
+ * Attempt to update an existing key.
+ *
+ * The key is given to us with an incremented refcount that we need to discard
+ * if we get an error.
+ */
+static inline key_ref_t __key_update(key_ref_t key_ref,
+				     struct key_preparsed_payload *prep)
+{
+	struct key *key = key_ref_to_ptr(key_ref);
+	int ret;
+
+	/* need write permission on the key to update it */
+	ret = key_permission(key_ref, KEY_NEED_WRITE);
+	if (ret < 0)
+		goto error;
+
+	ret = -EEXIST;
+	if (!key->type->update)
+		goto error;
+
+	down_write(&key->sem);
+
+	ret = key->type->update(key, prep);
+	if (ret == 0)
+		/* Updating a negative key positively instantiates it */
+		mark_key_instantiated(key, 0);
+
+	up_write(&key->sem);
+
+	if (ret < 0)
+		goto error;
+out:
+	return key_ref;
+
+error:
+	key_put(key);
+	key_ref = ERR_PTR(ret);
+	goto out;
+}
+
+/**
+ * key_create_or_update - Update or create and instantiate a key.
+ * @keyring_ref: A pointer to the destination keyring with possession flag.
+ * @type: The type of key.
+ * @description: The searchable description for the key.
+ * @payload: The data to use to instantiate or update the key.
+ * @plen: The length of @payload.
+ * @perm: The permissions mask for a new key.
+ * @flags: The quota flags for a new key.
+ *
+ * Search the destination keyring for a key of the same description and if one
+ * is found, update it, otherwise create and instantiate a new one and create a
+ * link to it from that keyring.
+ *
+ * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
+ * concocted.
+ *
+ * Returns a pointer to the new key if successful, -ENODEV if the key type
+ * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
+ * caller isn't permitted to modify the keyring or the LSM did not permit
+ * creation of the key.
+ *
+ * On success, the possession flag from the keyring ref will be tacked on to
+ * the key ref before it is returned.
+ */
+key_ref_t key_create_or_update(key_ref_t keyring_ref,
+			       const char *type,
+			       const char *description,
+			       const void *payload,
+			       size_t plen,
+			       key_perm_t perm,
+			       unsigned long flags)
+{
+	struct keyring_index_key index_key = {
+		.description	= description,
+	};
+	struct key_preparsed_payload prep;
+	struct assoc_array_edit *edit;
+	const struct cred *cred = current_cred();
+	struct key *keyring, *key = NULL;
+	key_ref_t key_ref;
+	int ret;
+
+	/* look up the key type to see if it's one of the registered kernel
+	 * types */
+	index_key.type = key_type_lookup(type);
+	if (IS_ERR(index_key.type)) {
+		key_ref = ERR_PTR(-ENODEV);
+		goto error;
+	}
+
+	key_ref = ERR_PTR(-EINVAL);
+	if (!index_key.type->instantiate ||
+	    (!index_key.description && !index_key.type->preparse))
+		goto error_put_type;
+
+	keyring = key_ref_to_ptr(keyring_ref);
+
+	key_check(keyring);
+
+	key_ref = ERR_PTR(-ENOTDIR);
+	if (keyring->type != &key_type_keyring)
+		goto error_put_type;
+
+	memset(&prep, 0, sizeof(prep));
+	prep.data = payload;
+	prep.datalen = plen;
+	prep.quotalen = index_key.type->def_datalen;
+	prep.trusted = flags & KEY_ALLOC_TRUSTED;
+	prep.expiry = TIME_T_MAX;
+	if (index_key.type->preparse) {
+		ret = index_key.type->preparse(&prep);
+		if (ret < 0) {
+			key_ref = ERR_PTR(ret);
+			goto error_free_prep;
+		}
+		if (!index_key.description)
+			index_key.description = prep.description;
+		key_ref = ERR_PTR(-EINVAL);
+		if (!index_key.description)
+			goto error_free_prep;
+	}
+	index_key.desc_len = strlen(index_key.description);
+
+	key_ref = ERR_PTR(-EPERM);
+	if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags))
+		goto error_free_prep;
+	flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0;
+
+	ret = __key_link_begin(keyring, &index_key, &edit);
+	if (ret < 0) {
+		key_ref = ERR_PTR(ret);
+		goto error_free_prep;
+	}
+
+	/* if we're going to allocate a new key, we're going to have
+	 * to modify the keyring */
+	ret = key_permission(keyring_ref, KEY_NEED_WRITE);
+	if (ret < 0) {
+		key_ref = ERR_PTR(ret);
+		goto error_link_end;
+	}
+
+	/* if it's possible to update this type of key, search for an existing
+	 * key of the same type and description in the destination keyring and
+	 * update that instead if possible
+	 */
+	if (index_key.type->update) {
+		key_ref = find_key_to_update(keyring_ref, &index_key);
+		if (key_ref)
+			goto found_matching_key;
+	}
+
+	/* if the client doesn't provide, decide on the permissions we want */
+	if (perm == KEY_PERM_UNDEF) {
+		perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+		perm |= KEY_USR_VIEW;
+
+		if (index_key.type->read)
+			perm |= KEY_POS_READ;
+
+		if (index_key.type == &key_type_keyring ||
+		    index_key.type->update)
+			perm |= KEY_POS_WRITE;
+	}
+
+	/* allocate a new key */
+	key = key_alloc(index_key.type, index_key.description,
+			cred->fsuid, cred->fsgid, cred, perm, flags);
+	if (IS_ERR(key)) {
+		key_ref = ERR_CAST(key);
+		goto error_link_end;
+	}
+
+	/* instantiate it and link it into the target keyring */
+	ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
+	if (ret < 0) {
+		key_put(key);
+		key_ref = ERR_PTR(ret);
+		goto error_link_end;
+	}
+
+	key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
+
+error_link_end:
+	__key_link_end(keyring, &index_key, edit);
+error_free_prep:
+	if (index_key.type->preparse)
+		index_key.type->free_preparse(&prep);
+error_put_type:
+	key_type_put(index_key.type);
+error:
+	return key_ref;
+
+ found_matching_key:
+	/* we found a matching key, so we're going to try to update it
+	 * - we can drop the locks first as we have the key pinned
+	 */
+	__key_link_end(keyring, &index_key, edit);
+
+	key = key_ref_to_ptr(key_ref);
+	if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
+		ret = wait_for_key_construction(key, true);
+		if (ret < 0) {
+			key_ref_put(key_ref);
+			key_ref = ERR_PTR(ret);
+			goto error_free_prep;
+		}
+	}
+
+	key_ref = __key_update(key_ref, &prep);
+	goto error_free_prep;
+}
+EXPORT_SYMBOL(key_create_or_update);
+
+/**
+ * key_update - Update a key's contents.
+ * @key_ref: The pointer (plus possession flag) to the key.
+ * @payload: The data to be used to update the key.
+ * @plen: The length of @payload.
+ *
+ * Attempt to update the contents of a key with the given payload data.  The
+ * caller must be granted Write permission on the key.  Negative keys can be
+ * instantiated by this method.
+ *
+ * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
+ * type does not support updating.  The key type may return other errors.
+ */
+int key_update(key_ref_t key_ref, const void *payload, size_t plen)
+{
+	struct key_preparsed_payload prep;
+	struct key *key = key_ref_to_ptr(key_ref);
+	int ret;
+
+	key_check(key);
+
+	/* the key must be writable */
+	ret = key_permission(key_ref, KEY_NEED_WRITE);
+	if (ret < 0)
+		return ret;
+
+	/* attempt to update it if supported */
+	if (!key->type->update)
+		return -EOPNOTSUPP;
+
+	memset(&prep, 0, sizeof(prep));
+	prep.data = payload;
+	prep.datalen = plen;
+	prep.quotalen = key->type->def_datalen;
+	prep.expiry = TIME_T_MAX;
+	if (key->type->preparse) {
+		ret = key->type->preparse(&prep);
+		if (ret < 0)
+			goto error;
+	}
+
+	down_write(&key->sem);
+
+	ret = key->type->update(key, &prep);
+	if (ret == 0)
+		/* Updating a negative key positively instantiates it */
+		mark_key_instantiated(key, 0);
+
+	up_write(&key->sem);
+
+error:
+	if (key->type->preparse)
+		key->type->free_preparse(&prep);
+	return ret;
+}
+EXPORT_SYMBOL(key_update);
+
+/**
+ * key_revoke - Revoke a key.
+ * @key: The key to be revoked.
+ *
+ * Mark a key as being revoked and ask the type to free up its resources.  The
+ * revocation timeout is set and the key and all its links will be
+ * automatically garbage collected after key_gc_delay amount of time if they
+ * are not manually dealt with first.
+ */
+void key_revoke(struct key *key)
+{
+	struct timespec now;
+	time_t time;
+
+	key_check(key);
+
+	/* make sure no one's trying to change or use the key when we mark it
+	 * - we tell lockdep that we might nest because we might be revoking an
+	 *   authorisation key whilst holding the sem on a key we've just
+	 *   instantiated
+	 */
+	down_write_nested(&key->sem, 1);
+	if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
+	    key->type->revoke)
+		key->type->revoke(key);
+
+	/* set the death time to no more than the expiry time */
+	now = current_kernel_time();
+	time = now.tv_sec;
+	if (key->revoked_at == 0 || key->revoked_at > time) {
+		key->revoked_at = time;
+		key_schedule_gc(key->revoked_at + key_gc_delay);
+	}
+
+	up_write(&key->sem);
+}
+EXPORT_SYMBOL(key_revoke);
+
+/**
+ * key_invalidate - Invalidate a key.
+ * @key: The key to be invalidated.
+ *
+ * Mark a key as being invalidated and have it cleaned up immediately.  The key
+ * is ignored by all searches and other operations from this point.
+ */
+void key_invalidate(struct key *key)
+{
+	kenter("%d", key_serial(key));
+
+	key_check(key);
+
+	if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
+		down_write_nested(&key->sem, 1);
+		if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags))
+			key_schedule_gc_links();
+		up_write(&key->sem);
+	}
+}
+EXPORT_SYMBOL(key_invalidate);
+
+/**
+ * generic_key_instantiate - Simple instantiation of a key from preparsed data
+ * @key: The key to be instantiated
+ * @prep: The preparsed data to load.
+ *
+ * Instantiate a key from preparsed data.  We assume we can just copy the data
+ * in directly and clear the old pointers.
+ *
+ * This can be pointed to directly by the key type instantiate op pointer.
+ */
+int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+{
+	int ret;
+
+	pr_devel("==>%s()\n", __func__);
+
+	ret = key_payload_reserve(key, prep->quotalen);
+	if (ret == 0) {
+		rcu_assign_keypointer(key, prep->payload.data[0]);
+		key->payload.data[1] = prep->payload.data[1];
+		key->payload.data[2] = prep->payload.data[2];
+		key->payload.data[3] = prep->payload.data[3];
+		prep->payload.data[0] = NULL;
+		prep->payload.data[1] = NULL;
+		prep->payload.data[2] = NULL;
+		prep->payload.data[3] = NULL;
+	}
+	pr_devel("<==%s() = %d\n", __func__, ret);
+	return ret;
+}
+EXPORT_SYMBOL(generic_key_instantiate);
+
+/**
+ * register_key_type - Register a type of key.
+ * @ktype: The new key type.
+ *
+ * Register a new key type.
+ *
+ * Returns 0 on success or -EEXIST if a type of this name already exists.
+ */
+int register_key_type(struct key_type *ktype)
+{
+	struct key_type *p;
+	int ret;
+
+	memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
+
+	ret = -EEXIST;
+	down_write(&key_types_sem);
+
+	/* disallow key types with the same name */
+	list_for_each_entry(p, &key_types_list, link) {
+		if (strcmp(p->name, ktype->name) == 0)
+			goto out;
+	}
+
+	/* store the type */
+	list_add(&ktype->link, &key_types_list);
+
+	pr_notice("Key type %s registered\n", ktype->name);
+	ret = 0;
+
+out:
+	up_write(&key_types_sem);
+	return ret;
+}
+EXPORT_SYMBOL(register_key_type);
+
+/**
+ * unregister_key_type - Unregister a type of key.
+ * @ktype: The key type.
+ *
+ * Unregister a key type and mark all the extant keys of this type as dead.
+ * Those keys of this type are then destroyed to get rid of their payloads and
+ * they and their links will be garbage collected as soon as possible.
+ */
+void unregister_key_type(struct key_type *ktype)
+{
+	down_write(&key_types_sem);
+	list_del_init(&ktype->link);
+	downgrade_write(&key_types_sem);
+	key_gc_keytype(ktype);
+	pr_notice("Key type %s unregistered\n", ktype->name);
+	up_read(&key_types_sem);
+}
+EXPORT_SYMBOL(unregister_key_type);
+
+/*
+ * Initialise the key management state.
+ */
+void __init key_init(void)
+{
+	/* allocate a slab in which we can store keys */
+	key_jar = kmem_cache_create("key_jar", sizeof(struct key),
+			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+
+	/* add the special key types */
+	list_add_tail(&key_type_keyring.link, &key_types_list);
+	list_add_tail(&key_type_dead.link, &key_types_list);
+	list_add_tail(&key_type_user.link, &key_types_list);
+	list_add_tail(&key_type_logon.link, &key_types_list);
+
+	/* record the root user tracking */
+	rb_link_node(&root_key_user.node,
+		     NULL,
+		     &key_user_tree.rb_node);
+
+	rb_insert_color(&root_key_user.node,
+			&key_user_tree);
+}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
new file mode 100644
index 0000000..2e741e1
--- /dev/null
+++ b/security/keys/keyctl.c
@@ -0,0 +1,1665 @@
+/* Userspace key control operations
+ *
+ * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/fs.h>
+#include <linux/capability.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
+#include <linux/uio.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+
+#define KEY_MAX_DESC_SIZE 4096
+
+static int key_get_type_from_user(char *type,
+				  const char __user *_type,
+				  unsigned len)
+{
+	int ret;
+
+	ret = strncpy_from_user(type, _type, len);
+	if (ret < 0)
+		return ret;
+	if (ret == 0 || ret >= len)
+		return -EINVAL;
+	if (type[0] == '.')
+		return -EPERM;
+	type[len - 1] = '\0';
+	return 0;
+}
+
+/*
+ * Extract the description of a new key from userspace and either add it as a
+ * new key to the specified keyring or update a matching key in that keyring.
+ *
+ * If the description is NULL or an empty string, the key type is asked to
+ * generate one from the payload.
+ *
+ * The keyring must be writable so that we can attach the key to it.
+ *
+ * If successful, the new key's serial number is returned, otherwise an error
+ * code is returned.
+ */
+SYSCALL_DEFINE5(add_key, const char __user *, _type,
+		const char __user *, _description,
+		const void __user *, _payload,
+		size_t, plen,
+		key_serial_t, ringid)
+{
+	key_ref_t keyring_ref, key_ref;
+	char type[32], *description;
+	void *payload;
+	long ret;
+
+	ret = -EINVAL;
+	if (plen > 1024 * 1024 - 1)
+		goto error;
+
+	/* draw all the data into kernel space */
+	ret = key_get_type_from_user(type, _type, sizeof(type));
+	if (ret < 0)
+		goto error;
+
+	description = NULL;
+	if (_description) {
+		description = strndup_user(_description, KEY_MAX_DESC_SIZE);
+		if (IS_ERR(description)) {
+			ret = PTR_ERR(description);
+			goto error;
+		}
+		if (!*description) {
+			kfree(description);
+			description = NULL;
+		} else if ((description[0] == '.') &&
+			   (strncmp(type, "keyring", 7) == 0)) {
+			ret = -EPERM;
+			goto error2;
+		}
+	}
+
+	/* pull the payload in if one was supplied */
+	payload = NULL;
+
+	if (plen) {
+		ret = -ENOMEM;
+		payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
+		if (!payload) {
+			if (plen <= PAGE_SIZE)
+				goto error2;
+			payload = vmalloc(plen);
+			if (!payload)
+				goto error2;
+		}
+
+		ret = -EFAULT;
+		if (copy_from_user(payload, _payload, plen) != 0)
+			goto error3;
+	}
+
+	/* find the target keyring (which must be writable) */
+	keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+	if (IS_ERR(keyring_ref)) {
+		ret = PTR_ERR(keyring_ref);
+		goto error3;
+	}
+
+	/* create or update the requested key and add it to the target
+	 * keyring */
+	key_ref = key_create_or_update(keyring_ref, type, description,
+				       payload, plen, KEY_PERM_UNDEF,
+				       KEY_ALLOC_IN_QUOTA);
+	if (!IS_ERR(key_ref)) {
+		ret = key_ref_to_ptr(key_ref)->serial;
+		key_ref_put(key_ref);
+	}
+	else {
+		ret = PTR_ERR(key_ref);
+	}
+
+	key_ref_put(keyring_ref);
+ error3:
+	kvfree(payload);
+ error2:
+	kfree(description);
+ error:
+	return ret;
+}
+
+/*
+ * Search the process keyrings and keyring trees linked from those for a
+ * matching key.  Keyrings must have appropriate Search permission to be
+ * searched.
+ *
+ * If a key is found, it will be attached to the destination keyring if there's
+ * one specified and the serial number of the key will be returned.
+ *
+ * If no key is found, /sbin/request-key will be invoked if _callout_info is
+ * non-NULL in an attempt to create a key.  The _callout_info string will be
+ * passed to /sbin/request-key to aid with completing the request.  If the
+ * _callout_info string is "" then it will be changed to "-".
+ */
+SYSCALL_DEFINE4(request_key, const char __user *, _type,
+		const char __user *, _description,
+		const char __user *, _callout_info,
+		key_serial_t, destringid)
+{
+	struct key_type *ktype;
+	struct key *key;
+	key_ref_t dest_ref;
+	size_t callout_len;
+	char type[32], *description, *callout_info;
+	long ret;
+
+	/* pull the type into kernel space */
+	ret = key_get_type_from_user(type, _type, sizeof(type));
+	if (ret < 0)
+		goto error;
+
+	/* pull the description into kernel space */
+	description = strndup_user(_description, KEY_MAX_DESC_SIZE);
+	if (IS_ERR(description)) {
+		ret = PTR_ERR(description);
+		goto error;
+	}
+
+	/* pull the callout info into kernel space */
+	callout_info = NULL;
+	callout_len = 0;
+	if (_callout_info) {
+		callout_info = strndup_user(_callout_info, PAGE_SIZE);
+		if (IS_ERR(callout_info)) {
+			ret = PTR_ERR(callout_info);
+			goto error2;
+		}
+		callout_len = strlen(callout_info);
+	}
+
+	/* get the destination keyring if specified */
+	dest_ref = NULL;
+	if (destringid) {
+		dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
+					   KEY_NEED_WRITE);
+		if (IS_ERR(dest_ref)) {
+			ret = PTR_ERR(dest_ref);
+			goto error3;
+		}
+	}
+
+	/* find the key type */
+	ktype = key_type_lookup(type);
+	if (IS_ERR(ktype)) {
+		ret = PTR_ERR(ktype);
+		goto error4;
+	}
+
+	/* do the search */
+	key = request_key_and_link(ktype, description, callout_info,
+				   callout_len, NULL, key_ref_to_ptr(dest_ref),
+				   KEY_ALLOC_IN_QUOTA);
+	if (IS_ERR(key)) {
+		ret = PTR_ERR(key);
+		goto error5;
+	}
+
+	/* wait for the key to finish being constructed */
+	ret = wait_for_key_construction(key, 1);
+	if (ret < 0)
+		goto error6;
+
+	ret = key->serial;
+
+error6:
+ 	key_put(key);
+error5:
+	key_type_put(ktype);
+error4:
+	key_ref_put(dest_ref);
+error3:
+	kfree(callout_info);
+error2:
+	kfree(description);
+error:
+	return ret;
+}
+
+/*
+ * Get the ID of the specified process keyring.
+ *
+ * The requested keyring must have search permission to be found.
+ *
+ * If successful, the ID of the requested keyring will be returned.
+ */
+long keyctl_get_keyring_ID(key_serial_t id, int create)
+{
+	key_ref_t key_ref;
+	unsigned long lflags;
+	long ret;
+
+	lflags = create ? KEY_LOOKUP_CREATE : 0;
+	key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH);
+	if (IS_ERR(key_ref)) {
+		ret = PTR_ERR(key_ref);
+		goto error;
+	}
+
+	ret = key_ref_to_ptr(key_ref)->serial;
+	key_ref_put(key_ref);
+error:
+	return ret;
+}
+
+/*
+ * Join a (named) session keyring.
+ *
+ * Create and join an anonymous session keyring or join a named session
+ * keyring, creating it if necessary.  A named session keyring must have Search
+ * permission for it to be joined.  Session keyrings without this permit will
+ * be skipped over.  It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
+ *
+ * If successful, the ID of the joined session keyring will be returned.
+ */
+long keyctl_join_session_keyring(const char __user *_name)
+{
+	char *name;
+	long ret;
+
+	/* fetch the name from userspace */
+	name = NULL;
+	if (_name) {
+		name = strndup_user(_name, KEY_MAX_DESC_SIZE);
+		if (IS_ERR(name)) {
+			ret = PTR_ERR(name);
+			goto error;
+		}
+
+		ret = -EPERM;
+		if (name[0] == '.')
+			goto error_name;
+	}
+
+	/* join the session */
+	ret = join_session_keyring(name);
+error_name:
+	kfree(name);
+error:
+	return ret;
+}
+
+/*
+ * Update a key's data payload from the given data.
+ *
+ * The key must grant the caller Write permission and the key type must support
+ * updating for this to work.  A negative key can be positively instantiated
+ * with this call.
+ *
+ * If successful, 0 will be returned.  If the key type does not support
+ * updating, then -EOPNOTSUPP will be returned.
+ */
+long keyctl_update_key(key_serial_t id,
+		       const void __user *_payload,
+		       size_t plen)
+{
+	key_ref_t key_ref;
+	void *payload;
+	long ret;
+
+	ret = -EINVAL;
+	if (plen > PAGE_SIZE)
+		goto error;
+
+	/* pull the payload in if one was supplied */
+	payload = NULL;
+	if (plen) {
+		ret = -ENOMEM;
+		payload = kmalloc(plen, GFP_KERNEL);
+		if (!payload)
+			goto error;
+
+		ret = -EFAULT;
+		if (copy_from_user(payload, _payload, plen) != 0)
+			goto error2;
+	}
+
+	/* find the target key (which must be writable) */
+	key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
+	if (IS_ERR(key_ref)) {
+		ret = PTR_ERR(key_ref);
+		goto error2;
+	}
+
+	/* update the key */
+	ret = key_update(key_ref, payload, plen);
+
+	key_ref_put(key_ref);
+error2:
+	kfree(payload);
+error:
+	return ret;
+}
+
+/*
+ * Revoke a key.
+ *
+ * The key must be grant the caller Write or Setattr permission for this to
+ * work.  The key type should give up its quota claim when revoked.  The key
+ * and any links to the key will be automatically garbage collected after a
+ * certain amount of time (/proc/sys/kernel/keys/gc_delay).
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_revoke_key(key_serial_t id)
+{
+	key_ref_t key_ref;
+	long ret;
+
+	key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
+	if (IS_ERR(key_ref)) {
+		ret = PTR_ERR(key_ref);
+		if (ret != -EACCES)
+			goto error;
+		key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
+		if (IS_ERR(key_ref)) {
+			ret = PTR_ERR(key_ref);
+			goto error;
+		}
+	}
+
+	key_revoke(key_ref_to_ptr(key_ref));
+	ret = 0;
+
+	key_ref_put(key_ref);
+error:
+	return ret;
+}
+
+/*
+ * Invalidate a key.
+ *
+ * The key must be grant the caller Invalidate permission for this to work.
+ * The key and any links to the key will be automatically garbage collected
+ * immediately.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_invalidate_key(key_serial_t id)
+{
+	key_ref_t key_ref;
+	long ret;
+
+	kenter("%d", id);
+
+	key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
+	if (IS_ERR(key_ref)) {
+		ret = PTR_ERR(key_ref);
+
+		/* Root is permitted to invalidate certain special keys */
+		if (capable(CAP_SYS_ADMIN)) {
+			key_ref = lookup_user_key(id, 0, 0);
+			if (IS_ERR(key_ref))
+				goto error;
+			if (test_bit(KEY_FLAG_ROOT_CAN_INVAL,
+				     &key_ref_to_ptr(key_ref)->flags))
+				goto invalidate;
+			goto error_put;
+		}
+
+		goto error;
+	}
+
+invalidate:
+	key_invalidate(key_ref_to_ptr(key_ref));
+	ret = 0;
+error_put:
+	key_ref_put(key_ref);
+error:
+	kleave(" = %ld", ret);
+	return ret;
+}
+
+/*
+ * Clear the specified keyring, creating an empty process keyring if one of the
+ * special keyring IDs is used.
+ *
+ * The keyring must grant the caller Write permission for this to work.  If
+ * successful, 0 will be returned.
+ */
+long keyctl_keyring_clear(key_serial_t ringid)
+{
+	key_ref_t keyring_ref;
+	long ret;
+
+	keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+	if (IS_ERR(keyring_ref)) {
+		ret = PTR_ERR(keyring_ref);
+
+		/* Root is permitted to invalidate certain special keyrings */
+		if (capable(CAP_SYS_ADMIN)) {
+			keyring_ref = lookup_user_key(ringid, 0, 0);
+			if (IS_ERR(keyring_ref))
+				goto error;
+			if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR,
+				     &key_ref_to_ptr(keyring_ref)->flags))
+				goto clear;
+			goto error_put;
+		}
+
+		goto error;
+	}
+
+clear:
+	ret = keyring_clear(key_ref_to_ptr(keyring_ref));
+error_put:
+	key_ref_put(keyring_ref);
+error:
+	return ret;
+}
+
+/*
+ * Create a link from a keyring to a key if there's no matching key in the
+ * keyring, otherwise replace the link to the matching key with a link to the
+ * new key.
+ *
+ * The key must grant the caller Link permission and the the keyring must grant
+ * the caller Write permission.  Furthermore, if an additional link is created,
+ * the keyring's quota will be extended.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
+{
+	key_ref_t keyring_ref, key_ref;
+	long ret;
+
+	keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+	if (IS_ERR(keyring_ref)) {
+		ret = PTR_ERR(keyring_ref);
+		goto error;
+	}
+
+	key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
+	if (IS_ERR(key_ref)) {
+		ret = PTR_ERR(key_ref);
+		goto error2;
+	}
+
+	ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
+
+	key_ref_put(key_ref);
+error2:
+	key_ref_put(keyring_ref);
+error:
+	return ret;
+}
+
+/*
+ * Unlink a key from a keyring.
+ *
+ * The keyring must grant the caller Write permission for this to work; the key
+ * itself need not grant the caller anything.  If the last link to a key is
+ * removed then that key will be scheduled for destruction.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
+{
+	key_ref_t keyring_ref, key_ref;
+	long ret;
+
+	keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE);
+	if (IS_ERR(keyring_ref)) {
+		ret = PTR_ERR(keyring_ref);
+		goto error;
+	}
+
+	key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0);
+	if (IS_ERR(key_ref)) {
+		ret = PTR_ERR(key_ref);
+		goto error2;
+	}
+
+	ret = key_unlink(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
+
+	key_ref_put(key_ref);
+error2:
+	key_ref_put(keyring_ref);
+error:
+	return ret;
+}
+
+/*
+ * Return a description of a key to userspace.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, we place up to buflen bytes of data into it formatted
+ * in the following way:
+ *
+ *	type;uid;gid;perm;description<NUL>
+ *
+ * If successful, we return the amount of description available, irrespective
+ * of how much we may have copied into the buffer.
+ */
+long keyctl_describe_key(key_serial_t keyid,
+			 char __user *buffer,
+			 size_t buflen)
+{
+	struct key *key, *instkey;
+	key_ref_t key_ref;
+	char *infobuf;
+	long ret;
+	int desclen, infolen;
+
+	key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
+	if (IS_ERR(key_ref)) {
+		/* viewing a key under construction is permitted if we have the
+		 * authorisation token handy */
+		if (PTR_ERR(key_ref) == -EACCES) {
+			instkey = key_get_instantiation_authkey(keyid);
+			if (!IS_ERR(instkey)) {
+				key_put(instkey);
+				key_ref = lookup_user_key(keyid,
+							  KEY_LOOKUP_PARTIAL,
+							  0);
+				if (!IS_ERR(key_ref))
+					goto okay;
+			}
+		}
+
+		ret = PTR_ERR(key_ref);
+		goto error;
+	}
+
+okay:
+	key = key_ref_to_ptr(key_ref);
+	desclen = strlen(key->description);
+
+	/* calculate how much information we're going to return */
+	ret = -ENOMEM;
+	infobuf = kasprintf(GFP_KERNEL,
+			    "%s;%d;%d;%08x;",
+			    key->type->name,
+			    from_kuid_munged(current_user_ns(), key->uid),
+			    from_kgid_munged(current_user_ns(), key->gid),
+			    key->perm);
+	if (!infobuf)
+		goto error2;
+	infolen = strlen(infobuf);
+	ret = infolen + desclen + 1;
+
+	/* consider returning the data */
+	if (buffer && buflen >= ret) {
+		if (copy_to_user(buffer, infobuf, infolen) != 0 ||
+		    copy_to_user(buffer + infolen, key->description,
+				 desclen + 1) != 0)
+			ret = -EFAULT;
+	}
+
+	kfree(infobuf);
+error2:
+	key_ref_put(key_ref);
+error:
+	return ret;
+}
+
+/*
+ * Search the specified keyring and any keyrings it links to for a matching
+ * key.  Only keyrings that grant the caller Search permission will be searched
+ * (this includes the starting keyring).  Only keys with Search permission can
+ * be found.
+ *
+ * If successful, the found key will be linked to the destination keyring if
+ * supplied and the key has Link permission, and the found key ID will be
+ * returned.
+ */
+long keyctl_keyring_search(key_serial_t ringid,
+			   const char __user *_type,
+			   const char __user *_description,
+			   key_serial_t destringid)
+{
+	struct key_type *ktype;
+	key_ref_t keyring_ref, key_ref, dest_ref;
+	char type[32], *description;
+	long ret;
+
+	/* pull the type and description into kernel space */
+	ret = key_get_type_from_user(type, _type, sizeof(type));
+	if (ret < 0)
+		goto error;
+
+	description = strndup_user(_description, KEY_MAX_DESC_SIZE);
+	if (IS_ERR(description)) {
+		ret = PTR_ERR(description);
+		goto error;
+	}
+
+	/* get the keyring at which to begin the search */
+	keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH);
+	if (IS_ERR(keyring_ref)) {
+		ret = PTR_ERR(keyring_ref);
+		goto error2;
+	}
+
+	/* get the destination keyring if specified */
+	dest_ref = NULL;
+	if (destringid) {
+		dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
+					   KEY_NEED_WRITE);
+		if (IS_ERR(dest_ref)) {
+			ret = PTR_ERR(dest_ref);
+			goto error3;
+		}
+	}
+
+	/* find the key type */
+	ktype = key_type_lookup(type);
+	if (IS_ERR(ktype)) {
+		ret = PTR_ERR(ktype);
+		goto error4;
+	}
+
+	/* do the search */
+	key_ref = keyring_search(keyring_ref, ktype, description);
+	if (IS_ERR(key_ref)) {
+		ret = PTR_ERR(key_ref);
+
+		/* treat lack or presence of a negative key the same */
+		if (ret == -EAGAIN)
+			ret = -ENOKEY;
+		goto error5;
+	}
+
+	/* link the resulting key to the destination keyring if we can */
+	if (dest_ref) {
+		ret = key_permission(key_ref, KEY_NEED_LINK);
+		if (ret < 0)
+			goto error6;
+
+		ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref));
+		if (ret < 0)
+			goto error6;
+	}
+
+	ret = key_ref_to_ptr(key_ref)->serial;
+
+error6:
+	key_ref_put(key_ref);
+error5:
+	key_type_put(ktype);
+error4:
+	key_ref_put(dest_ref);
+error3:
+	key_ref_put(keyring_ref);
+error2:
+	kfree(description);
+error:
+	return ret;
+}
+
+/*
+ * Read a key's payload.
+ *
+ * The key must either grant the caller Read permission, or it must grant the
+ * caller Search permission when searched for from the process keyrings.
+ *
+ * If successful, we place up to buflen bytes of data into the buffer, if one
+ * is provided, and return the amount of data that is available in the key,
+ * irrespective of how much we copied into the buffer.
+ */
+long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
+{
+	struct key *key;
+	key_ref_t key_ref;
+	long ret;
+
+	/* find the key first */
+	key_ref = lookup_user_key(keyid, 0, 0);
+	if (IS_ERR(key_ref)) {
+		ret = -ENOKEY;
+		goto error;
+	}
+
+	key = key_ref_to_ptr(key_ref);
+
+	ret = key_read_state(key);
+	if (ret < 0)
+		goto error2; /* Negatively instantiated */
+
+	/* see if we can read it directly */
+	ret = key_permission(key_ref, KEY_NEED_READ);
+	if (ret == 0)
+		goto can_read_key;
+	if (ret != -EACCES)
+		goto error;
+
+	/* we can't; see if it's searchable from this process's keyrings
+	 * - we automatically take account of the fact that it may be
+	 *   dangling off an instantiation key
+	 */
+	if (!is_key_possessed(key_ref)) {
+		ret = -EACCES;
+		goto error2;
+	}
+
+	/* the key is probably readable - now try to read it */
+can_read_key:
+	ret = -EOPNOTSUPP;
+	if (key->type->read) {
+		/* Read the data with the semaphore held (since we might sleep)
+		 * to protect against the key being updated or revoked.
+		 */
+		down_read(&key->sem);
+		ret = key_validate(key);
+		if (ret == 0)
+			ret = key->type->read(key, buffer, buflen);
+		up_read(&key->sem);
+	}
+
+error2:
+	key_put(key);
+error:
+	return ret;
+}
+
+/*
+ * Change the ownership of a key
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet.  For the UID to be changed, or
+ * for the GID to be changed to a group the caller is not a member of, the
+ * caller must have sysadmin capability.  If either uid or gid is -1 then that
+ * attribute is not changed.
+ *
+ * If the UID is to be changed, the new user must have sufficient quota to
+ * accept the key.  The quota deduction will be removed from the old user to
+ * the new user should the attribute be changed.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
+{
+	struct key_user *newowner, *zapowner = NULL;
+	struct key *key;
+	key_ref_t key_ref;
+	long ret;
+	kuid_t uid;
+	kgid_t gid;
+
+	uid = make_kuid(current_user_ns(), user);
+	gid = make_kgid(current_user_ns(), group);
+	ret = -EINVAL;
+	if ((user != (uid_t) -1) && !uid_valid(uid))
+		goto error;
+	if ((group != (gid_t) -1) && !gid_valid(gid))
+		goto error;
+
+	ret = 0;
+	if (user == (uid_t) -1 && group == (gid_t) -1)
+		goto error;
+
+	key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+				  KEY_NEED_SETATTR);
+	if (IS_ERR(key_ref)) {
+		ret = PTR_ERR(key_ref);
+		goto error;
+	}
+
+	key = key_ref_to_ptr(key_ref);
+
+	/* make the changes with the locks held to prevent chown/chown races */
+	ret = -EACCES;
+	down_write(&key->sem);
+
+	if (!capable(CAP_SYS_ADMIN)) {
+		/* only the sysadmin can chown a key to some other UID */
+		if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
+			goto error_put;
+
+		/* only the sysadmin can set the key's GID to a group other
+		 * than one of those that the current process subscribes to */
+		if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
+			goto error_put;
+	}
+
+	/* change the UID */
+	if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
+		ret = -ENOMEM;
+		newowner = key_user_lookup(uid);
+		if (!newowner)
+			goto error_put;
+
+		/* transfer the quota burden to the new user */
+		if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+			unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
+				key_quota_root_maxkeys : key_quota_maxkeys;
+			unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
+				key_quota_root_maxbytes : key_quota_maxbytes;
+
+			spin_lock(&newowner->lock);
+			if (newowner->qnkeys + 1 >= maxkeys ||
+			    newowner->qnbytes + key->quotalen >= maxbytes ||
+			    newowner->qnbytes + key->quotalen <
+			    newowner->qnbytes)
+				goto quota_overrun;
+
+			newowner->qnkeys++;
+			newowner->qnbytes += key->quotalen;
+			spin_unlock(&newowner->lock);
+
+			spin_lock(&key->user->lock);
+			key->user->qnkeys--;
+			key->user->qnbytes -= key->quotalen;
+			spin_unlock(&key->user->lock);
+		}
+
+		atomic_dec(&key->user->nkeys);
+		atomic_inc(&newowner->nkeys);
+
+		if (key->state != KEY_IS_UNINSTANTIATED) {
+			atomic_dec(&key->user->nikeys);
+			atomic_inc(&newowner->nikeys);
+		}
+
+		zapowner = key->user;
+		key->user = newowner;
+		key->uid = uid;
+	}
+
+	/* change the GID */
+	if (group != (gid_t) -1)
+		key->gid = gid;
+
+	ret = 0;
+
+error_put:
+	up_write(&key->sem);
+	key_put(key);
+	if (zapowner)
+		key_user_put(zapowner);
+error:
+	return ret;
+
+quota_overrun:
+	spin_unlock(&newowner->lock);
+	zapowner = newowner;
+	ret = -EDQUOT;
+	goto error_put;
+}
+
+/*
+ * Change the permission mask on a key.
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet.  If the caller does not have
+ * sysadmin capability, it may only change the permission on keys that it owns.
+ */
+long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
+{
+	struct key *key;
+	key_ref_t key_ref;
+	long ret;
+
+	ret = -EINVAL;
+	if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
+		goto error;
+
+	key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+				  KEY_NEED_SETATTR);
+	if (IS_ERR(key_ref)) {
+		ret = PTR_ERR(key_ref);
+		goto error;
+	}
+
+	key = key_ref_to_ptr(key_ref);
+
+	/* make the changes with the locks held to prevent chown/chmod races */
+	ret = -EACCES;
+	down_write(&key->sem);
+
+	/* if we're not the sysadmin, we can only change a key that we own */
+	if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
+		key->perm = perm;
+		ret = 0;
+	}
+
+	up_write(&key->sem);
+	key_put(key);
+error:
+	return ret;
+}
+
+/*
+ * Get the destination keyring for instantiation and check that the caller has
+ * Write permission on it.
+ */
+static long get_instantiation_keyring(key_serial_t ringid,
+				      struct request_key_auth *rka,
+				      struct key **_dest_keyring)
+{
+	key_ref_t dkref;
+
+	*_dest_keyring = NULL;
+
+	/* just return a NULL pointer if we weren't asked to make a link */
+	if (ringid == 0)
+		return 0;
+
+	/* if a specific keyring is nominated by ID, then use that */
+	if (ringid > 0) {
+		dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+		if (IS_ERR(dkref))
+			return PTR_ERR(dkref);
+		*_dest_keyring = key_ref_to_ptr(dkref);
+		return 0;
+	}
+
+	if (ringid == KEY_SPEC_REQKEY_AUTH_KEY)
+		return -EINVAL;
+
+	/* otherwise specify the destination keyring recorded in the
+	 * authorisation key (any KEY_SPEC_*_KEYRING) */
+	if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) {
+		*_dest_keyring = key_get(rka->dest_keyring);
+		return 0;
+	}
+
+	return -ENOKEY;
+}
+
+/*
+ * Change the request_key authorisation key on the current process.
+ */
+static int keyctl_change_reqkey_auth(struct key *key)
+{
+	struct cred *new;
+
+	new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+
+	key_put(new->request_key_auth);
+	new->request_key_auth = key_get(key);
+
+	return commit_creds(new);
+}
+
+/*
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority).  No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_instantiate_key_common(key_serial_t id,
+				   struct iov_iter *from,
+				   key_serial_t ringid)
+{
+	const struct cred *cred = current_cred();
+	struct request_key_auth *rka;
+	struct key *instkey, *dest_keyring;
+	size_t plen = from ? iov_iter_count(from) : 0;
+	void *payload;
+	long ret;
+
+	kenter("%d,,%zu,%d", id, plen, ringid);
+
+	if (!plen)
+		from = NULL;
+
+	ret = -EINVAL;
+	if (plen > 1024 * 1024 - 1)
+		goto error;
+
+	/* the appropriate instantiation authorisation key must have been
+	 * assumed before calling this */
+	ret = -EPERM;
+	instkey = cred->request_key_auth;
+	if (!instkey)
+		goto error;
+
+	rka = instkey->payload.data[0];
+	if (rka->target_key->serial != id)
+		goto error;
+
+	/* pull the payload in if one was supplied */
+	payload = NULL;
+
+	if (from) {
+		ret = -ENOMEM;
+		payload = kmalloc(plen, GFP_KERNEL);
+		if (!payload) {
+			if (plen <= PAGE_SIZE)
+				goto error;
+			payload = vmalloc(plen);
+			if (!payload)
+				goto error;
+		}
+
+		ret = -EFAULT;
+		if (copy_from_iter(payload, plen, from) != plen)
+			goto error2;
+	}
+
+	/* find the destination keyring amongst those belonging to the
+	 * requesting task */
+	ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
+	if (ret < 0)
+		goto error2;
+
+	/* instantiate the key and link it into a keyring */
+	ret = key_instantiate_and_link(rka->target_key, payload, plen,
+				       dest_keyring, instkey);
+
+	key_put(dest_keyring);
+
+	/* discard the assumed authority if it's just been disabled by
+	 * instantiation of the key */
+	if (ret == 0)
+		keyctl_change_reqkey_auth(NULL);
+
+error2:
+	kvfree(payload);
+error:
+	return ret;
+}
+
+/*
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority).  No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_instantiate_key(key_serial_t id,
+			    const void __user *_payload,
+			    size_t plen,
+			    key_serial_t ringid)
+{
+	if (_payload && plen) {
+		struct iovec iov;
+		struct iov_iter from;
+		int ret;
+
+		ret = import_single_range(WRITE, (void __user *)_payload, plen,
+					  &iov, &from);
+		if (unlikely(ret))
+			return ret;
+
+		return keyctl_instantiate_key_common(id, &from, ringid);
+	}
+
+	return keyctl_instantiate_key_common(id, NULL, ringid);
+}
+
+/*
+ * Instantiate a key with the specified multipart payload and link the key into
+ * the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority).  No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_instantiate_key_iov(key_serial_t id,
+				const struct iovec __user *_payload_iov,
+				unsigned ioc,
+				key_serial_t ringid)
+{
+	struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+	struct iov_iter from;
+	long ret;
+
+	if (!_payload_iov)
+		ioc = 0;
+
+	ret = import_iovec(WRITE, _payload_iov, ioc,
+				    ARRAY_SIZE(iovstack), &iov, &from);
+	if (ret < 0)
+		return ret;
+	ret = keyctl_instantiate_key_common(id, &from, ringid);
+	kfree(iov);
+	return ret;
+}
+
+/*
+ * Negatively instantiate the key with the given timeout (in seconds) and link
+ * the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority).  No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return -ENOKEY until the negative key expires.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
+{
+	return keyctl_reject_key(id, timeout, ENOKEY, ringid);
+}
+
+/*
+ * Negatively instantiate the key with the given timeout (in seconds) and error
+ * code and link the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority).  No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return the specified error code until the negative key expires.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error,
+		       key_serial_t ringid)
+{
+	const struct cred *cred = current_cred();
+	struct request_key_auth *rka;
+	struct key *instkey, *dest_keyring;
+	long ret;
+
+	kenter("%d,%u,%u,%d", id, timeout, error, ringid);
+
+	/* must be a valid error code and mustn't be a kernel special */
+	if (error <= 0 ||
+	    error >= MAX_ERRNO ||
+	    error == ERESTARTSYS ||
+	    error == ERESTARTNOINTR ||
+	    error == ERESTARTNOHAND ||
+	    error == ERESTART_RESTARTBLOCK)
+		return -EINVAL;
+
+	/* the appropriate instantiation authorisation key must have been
+	 * assumed before calling this */
+	ret = -EPERM;
+	instkey = cred->request_key_auth;
+	if (!instkey)
+		goto error;
+
+	rka = instkey->payload.data[0];
+	if (rka->target_key->serial != id)
+		goto error;
+
+	/* find the destination keyring if present (which must also be
+	 * writable) */
+	ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
+	if (ret < 0)
+		goto error;
+
+	/* instantiate the key and link it into a keyring */
+	ret = key_reject_and_link(rka->target_key, timeout, error,
+				  dest_keyring, instkey);
+
+	key_put(dest_keyring);
+
+	/* discard the assumed authority if it's just been disabled by
+	 * instantiation of the key */
+	if (ret == 0)
+		keyctl_change_reqkey_auth(NULL);
+
+error:
+	return ret;
+}
+
+/*
+ * Read or set the default keyring in which request_key() will cache keys and
+ * return the old setting.
+ *
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist.  The old setting will be returned if successful.
+ */
+long keyctl_set_reqkey_keyring(int reqkey_defl)
+{
+	struct cred *new;
+	int ret, old_setting;
+
+	old_setting = current_cred_xxx(jit_keyring);
+
+	if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE)
+		return old_setting;
+
+	new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+
+	switch (reqkey_defl) {
+	case KEY_REQKEY_DEFL_THREAD_KEYRING:
+		ret = install_thread_keyring_to_cred(new);
+		if (ret < 0)
+			goto error;
+		goto set;
+
+	case KEY_REQKEY_DEFL_PROCESS_KEYRING:
+		ret = install_process_keyring_to_cred(new);
+		if (ret < 0)
+			goto error;
+		goto set;
+
+	case KEY_REQKEY_DEFL_DEFAULT:
+	case KEY_REQKEY_DEFL_SESSION_KEYRING:
+	case KEY_REQKEY_DEFL_USER_KEYRING:
+	case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
+	case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
+		goto set;
+
+	case KEY_REQKEY_DEFL_NO_CHANGE:
+	case KEY_REQKEY_DEFL_GROUP_KEYRING:
+	default:
+		ret = -EINVAL;
+		goto error;
+	}
+
+set:
+	new->jit_keyring = reqkey_defl;
+	commit_creds(new);
+	return old_setting;
+error:
+	abort_creds(new);
+	return ret;
+}
+
+/*
+ * Set or clear the timeout on a key.
+ *
+ * Either the key must grant the caller Setattr permission or else the caller
+ * must hold an instantiation authorisation token for the key.
+ *
+ * The timeout is either 0 to clear the timeout, or a number of seconds from
+ * the current time.  The key and any links to the key will be automatically
+ * garbage collected after the timeout expires.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_set_timeout(key_serial_t id, unsigned timeout)
+{
+	struct key *key, *instkey;
+	key_ref_t key_ref;
+	long ret;
+
+	key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+				  KEY_NEED_SETATTR);
+	if (IS_ERR(key_ref)) {
+		/* setting the timeout on a key under construction is permitted
+		 * if we have the authorisation token handy */
+		if (PTR_ERR(key_ref) == -EACCES) {
+			instkey = key_get_instantiation_authkey(id);
+			if (!IS_ERR(instkey)) {
+				key_put(instkey);
+				key_ref = lookup_user_key(id,
+							  KEY_LOOKUP_PARTIAL,
+							  0);
+				if (!IS_ERR(key_ref))
+					goto okay;
+			}
+		}
+
+		ret = PTR_ERR(key_ref);
+		goto error;
+	}
+
+okay:
+	key = key_ref_to_ptr(key_ref);
+	key_set_timeout(key, timeout);
+	key_put(key);
+
+	ret = 0;
+error:
+	return ret;
+}
+
+/*
+ * Assume (or clear) the authority to instantiate the specified key.
+ *
+ * This sets the authoritative token currently in force for key instantiation.
+ * This must be done for a key to be instantiated.  It has the effect of making
+ * available all the keys from the caller of the request_key() that created a
+ * key to request_key() calls made by the caller of this function.
+ *
+ * The caller must have the instantiation key in their process keyrings with a
+ * Search permission grant available to the caller.
+ *
+ * If the ID given is 0, then the setting will be cleared and 0 returned.
+ *
+ * If the ID given has a matching an authorisation key, then that key will be
+ * set and its ID will be returned.  The authorisation key can be read to get
+ * the callout information passed to request_key().
+ */
+long keyctl_assume_authority(key_serial_t id)
+{
+	struct key *authkey;
+	long ret;
+
+	/* special key IDs aren't permitted */
+	ret = -EINVAL;
+	if (id < 0)
+		goto error;
+
+	/* we divest ourselves of authority if given an ID of 0 */
+	if (id == 0) {
+		ret = keyctl_change_reqkey_auth(NULL);
+		goto error;
+	}
+
+	/* attempt to assume the authority temporarily granted to us whilst we
+	 * instantiate the specified key
+	 * - the authorisation key must be in the current task's keyrings
+	 *   somewhere
+	 */
+	authkey = key_get_instantiation_authkey(id);
+	if (IS_ERR(authkey)) {
+		ret = PTR_ERR(authkey);
+		goto error;
+	}
+
+	ret = keyctl_change_reqkey_auth(authkey);
+	if (ret < 0)
+		goto error;
+	key_put(authkey);
+
+	ret = authkey->serial;
+error:
+	return ret;
+}
+
+/*
+ * Get a key's the LSM security label.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, then up to buflen bytes of data will be placed into it.
+ *
+ * If successful, the amount of information available will be returned,
+ * irrespective of how much was copied (including the terminal NUL).
+ */
+long keyctl_get_security(key_serial_t keyid,
+			 char __user *buffer,
+			 size_t buflen)
+{
+	struct key *key, *instkey;
+	key_ref_t key_ref;
+	char *context;
+	long ret;
+
+	key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
+	if (IS_ERR(key_ref)) {
+		if (PTR_ERR(key_ref) != -EACCES)
+			return PTR_ERR(key_ref);
+
+		/* viewing a key under construction is also permitted if we
+		 * have the authorisation token handy */
+		instkey = key_get_instantiation_authkey(keyid);
+		if (IS_ERR(instkey))
+			return PTR_ERR(instkey);
+		key_put(instkey);
+
+		key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0);
+		if (IS_ERR(key_ref))
+			return PTR_ERR(key_ref);
+	}
+
+	key = key_ref_to_ptr(key_ref);
+	ret = security_key_getsecurity(key, &context);
+	if (ret == 0) {
+		/* if no information was returned, give userspace an empty
+		 * string */
+		ret = 1;
+		if (buffer && buflen > 0 &&
+		    copy_to_user(buffer, "", 1) != 0)
+			ret = -EFAULT;
+	} else if (ret > 0) {
+		/* return as much data as there's room for */
+		if (buffer && buflen > 0) {
+			if (buflen > ret)
+				buflen = ret;
+
+			if (copy_to_user(buffer, context, buflen) != 0)
+				ret = -EFAULT;
+		}
+
+		kfree(context);
+	}
+
+	key_ref_put(key_ref);
+	return ret;
+}
+
+/*
+ * Attempt to install the calling process's session keyring on the process's
+ * parent process.
+ *
+ * The keyring must exist and must grant the caller LINK permission, and the
+ * parent process must be single-threaded and must have the same effective
+ * ownership as this process and mustn't be SUID/SGID.
+ *
+ * The keyring will be emplaced on the parent when it next resumes userspace.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_session_to_parent(void)
+{
+	struct task_struct *me, *parent;
+	const struct cred *mycred, *pcred;
+	struct callback_head *newwork, *oldwork;
+	key_ref_t keyring_r;
+	struct cred *cred;
+	int ret;
+
+	keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK);
+	if (IS_ERR(keyring_r))
+		return PTR_ERR(keyring_r);
+
+	ret = -ENOMEM;
+
+	/* our parent is going to need a new cred struct, a new tgcred struct
+	 * and new security data, so we allocate them here to prevent ENOMEM in
+	 * our parent */
+	cred = cred_alloc_blank();
+	if (!cred)
+		goto error_keyring;
+	newwork = &cred->rcu;
+
+	cred->session_keyring = key_ref_to_ptr(keyring_r);
+	keyring_r = NULL;
+	init_task_work(newwork, key_change_session_keyring);
+
+	me = current;
+	rcu_read_lock();
+	write_lock_irq(&tasklist_lock);
+
+	ret = -EPERM;
+	oldwork = NULL;
+	parent = me->real_parent;
+
+	/* the parent mustn't be init and mustn't be a kernel thread */
+	if (parent->pid <= 1 || !parent->mm)
+		goto unlock;
+
+	/* the parent must be single threaded */
+	if (!thread_group_empty(parent))
+		goto unlock;
+
+	/* the parent and the child must have different session keyrings or
+	 * there's no point */
+	mycred = current_cred();
+	pcred = __task_cred(parent);
+	if (mycred == pcred ||
+	    mycred->session_keyring == pcred->session_keyring) {
+		ret = 0;
+		goto unlock;
+	}
+
+	/* the parent must have the same effective ownership and mustn't be
+	 * SUID/SGID */
+	if (!uid_eq(pcred->uid,	 mycred->euid) ||
+	    !uid_eq(pcred->euid, mycred->euid) ||
+	    !uid_eq(pcred->suid, mycred->euid) ||
+	    !gid_eq(pcred->gid,	 mycred->egid) ||
+	    !gid_eq(pcred->egid, mycred->egid) ||
+	    !gid_eq(pcred->sgid, mycred->egid))
+		goto unlock;
+
+	/* the keyrings must have the same UID */
+	if ((pcred->session_keyring &&
+	     !uid_eq(pcred->session_keyring->uid, mycred->euid)) ||
+	    !uid_eq(mycred->session_keyring->uid, mycred->euid))
+		goto unlock;
+
+	/* cancel an already pending keyring replacement */
+	oldwork = task_work_cancel(parent, key_change_session_keyring);
+
+	/* the replacement session keyring is applied just prior to userspace
+	 * restarting */
+	ret = task_work_add(parent, newwork, true);
+	if (!ret)
+		newwork = NULL;
+unlock:
+	write_unlock_irq(&tasklist_lock);
+	rcu_read_unlock();
+	if (oldwork)
+		put_cred(container_of(oldwork, struct cred, rcu));
+	if (newwork)
+		put_cred(cred);
+	return ret;
+
+error_keyring:
+	key_ref_put(keyring_r);
+	return ret;
+}
+
+/*
+ * The key control system call
+ */
+SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
+		unsigned long, arg4, unsigned long, arg5)
+{
+	switch (option) {
+	case KEYCTL_GET_KEYRING_ID:
+		return keyctl_get_keyring_ID((key_serial_t) arg2,
+					     (int) arg3);
+
+	case KEYCTL_JOIN_SESSION_KEYRING:
+		return keyctl_join_session_keyring((const char __user *) arg2);
+
+	case KEYCTL_UPDATE:
+		return keyctl_update_key((key_serial_t) arg2,
+					 (const void __user *) arg3,
+					 (size_t) arg4);
+
+	case KEYCTL_REVOKE:
+		return keyctl_revoke_key((key_serial_t) arg2);
+
+	case KEYCTL_DESCRIBE:
+		return keyctl_describe_key((key_serial_t) arg2,
+					   (char __user *) arg3,
+					   (unsigned) arg4);
+
+	case KEYCTL_CLEAR:
+		return keyctl_keyring_clear((key_serial_t) arg2);
+
+	case KEYCTL_LINK:
+		return keyctl_keyring_link((key_serial_t) arg2,
+					   (key_serial_t) arg3);
+
+	case KEYCTL_UNLINK:
+		return keyctl_keyring_unlink((key_serial_t) arg2,
+					     (key_serial_t) arg3);
+
+	case KEYCTL_SEARCH:
+		return keyctl_keyring_search((key_serial_t) arg2,
+					     (const char __user *) arg3,
+					     (const char __user *) arg4,
+					     (key_serial_t) arg5);
+
+	case KEYCTL_READ:
+		return keyctl_read_key((key_serial_t) arg2,
+				       (char __user *) arg3,
+				       (size_t) arg4);
+
+	case KEYCTL_CHOWN:
+		return keyctl_chown_key((key_serial_t) arg2,
+					(uid_t) arg3,
+					(gid_t) arg4);
+
+	case KEYCTL_SETPERM:
+		return keyctl_setperm_key((key_serial_t) arg2,
+					  (key_perm_t) arg3);
+
+	case KEYCTL_INSTANTIATE:
+		return keyctl_instantiate_key((key_serial_t) arg2,
+					      (const void __user *) arg3,
+					      (size_t) arg4,
+					      (key_serial_t) arg5);
+
+	case KEYCTL_NEGATE:
+		return keyctl_negate_key((key_serial_t) arg2,
+					 (unsigned) arg3,
+					 (key_serial_t) arg4);
+
+	case KEYCTL_SET_REQKEY_KEYRING:
+		return keyctl_set_reqkey_keyring(arg2);
+
+	case KEYCTL_SET_TIMEOUT:
+		return keyctl_set_timeout((key_serial_t) arg2,
+					  (unsigned) arg3);
+
+	case KEYCTL_ASSUME_AUTHORITY:
+		return keyctl_assume_authority((key_serial_t) arg2);
+
+	case KEYCTL_GET_SECURITY:
+		return keyctl_get_security((key_serial_t) arg2,
+					   (char __user *) arg3,
+					   (size_t) arg4);
+
+	case KEYCTL_SESSION_TO_PARENT:
+		return keyctl_session_to_parent();
+
+	case KEYCTL_REJECT:
+		return keyctl_reject_key((key_serial_t) arg2,
+					 (unsigned) arg3,
+					 (unsigned) arg4,
+					 (key_serial_t) arg5);
+
+	case KEYCTL_INSTANTIATE_IOV:
+		return keyctl_instantiate_key_iov(
+			(key_serial_t) arg2,
+			(const struct iovec __user *) arg3,
+			(unsigned) arg4,
+			(key_serial_t) arg5);
+
+	case KEYCTL_INVALIDATE:
+		return keyctl_invalidate_key((key_serial_t) arg2);
+
+	case KEYCTL_GET_PERSISTENT:
+		return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
new file mode 100644
index 0000000..d5264f9
--- /dev/null
+++ b/security/keys/keyring.c
@@ -0,0 +1,1396 @@
+/* Keyring handling
+ *
+ * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <keys/keyring-type.h>
+#include <keys/user-type.h>
+#include <linux/assoc_array_priv.h>
+#include <linux/uaccess.h>
+#include "internal.h"
+
+/*
+ * When plumbing the depths of the key tree, this sets a hard limit
+ * set on how deep we're willing to go.
+ */
+#define KEYRING_SEARCH_MAX_DEPTH 6
+
+/*
+ * We keep all named keyrings in a hash to speed looking them up.
+ */
+#define KEYRING_NAME_HASH_SIZE	(1 << 5)
+
+/*
+ * We mark pointers we pass to the associative array with bit 1 set if
+ * they're keyrings and clear otherwise.
+ */
+#define KEYRING_PTR_SUBTYPE	0x2UL
+
+static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x)
+{
+	return (unsigned long)x & KEYRING_PTR_SUBTYPE;
+}
+static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x)
+{
+	void *object = assoc_array_ptr_to_leaf(x);
+	return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE);
+}
+static inline void *keyring_key_to_ptr(struct key *key)
+{
+	if (key->type == &key_type_keyring)
+		return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE);
+	return key;
+}
+
+static struct list_head	keyring_name_hash[KEYRING_NAME_HASH_SIZE];
+static DEFINE_RWLOCK(keyring_name_lock);
+
+static inline unsigned keyring_hash(const char *desc)
+{
+	unsigned bucket = 0;
+
+	for (; *desc; desc++)
+		bucket += (unsigned char)*desc;
+
+	return bucket & (KEYRING_NAME_HASH_SIZE - 1);
+}
+
+/*
+ * The keyring key type definition.  Keyrings are simply keys of this type and
+ * can be treated as ordinary keys in addition to having their own special
+ * operations.
+ */
+static int keyring_preparse(struct key_preparsed_payload *prep);
+static void keyring_free_preparse(struct key_preparsed_payload *prep);
+static int keyring_instantiate(struct key *keyring,
+			       struct key_preparsed_payload *prep);
+static void keyring_revoke(struct key *keyring);
+static void keyring_destroy(struct key *keyring);
+static void keyring_describe(const struct key *keyring, struct seq_file *m);
+static long keyring_read(const struct key *keyring,
+			 char __user *buffer, size_t buflen);
+
+struct key_type key_type_keyring = {
+	.name		= "keyring",
+	.def_datalen	= 0,
+	.preparse	= keyring_preparse,
+	.free_preparse	= keyring_free_preparse,
+	.instantiate	= keyring_instantiate,
+	.revoke		= keyring_revoke,
+	.destroy	= keyring_destroy,
+	.describe	= keyring_describe,
+	.read		= keyring_read,
+};
+EXPORT_SYMBOL(key_type_keyring);
+
+/*
+ * Semaphore to serialise link/link calls to prevent two link calls in parallel
+ * introducing a cycle.
+ */
+static DECLARE_RWSEM(keyring_serialise_link_sem);
+
+/*
+ * Publish the name of a keyring so that it can be found by name (if it has
+ * one).
+ */
+static void keyring_publish_name(struct key *keyring)
+{
+	int bucket;
+
+	if (keyring->description) {
+		bucket = keyring_hash(keyring->description);
+
+		write_lock(&keyring_name_lock);
+
+		if (!keyring_name_hash[bucket].next)
+			INIT_LIST_HEAD(&keyring_name_hash[bucket]);
+
+		list_add_tail(&keyring->name_link,
+			      &keyring_name_hash[bucket]);
+
+		write_unlock(&keyring_name_lock);
+	}
+}
+
+/*
+ * Preparse a keyring payload
+ */
+static int keyring_preparse(struct key_preparsed_payload *prep)
+{
+	return prep->datalen != 0 ? -EINVAL : 0;
+}
+
+/*
+ * Free a preparse of a user defined key payload
+ */
+static void keyring_free_preparse(struct key_preparsed_payload *prep)
+{
+}
+
+/*
+ * Initialise a keyring.
+ *
+ * Returns 0 on success, -EINVAL if given any data.
+ */
+static int keyring_instantiate(struct key *keyring,
+			       struct key_preparsed_payload *prep)
+{
+	assoc_array_init(&keyring->keys);
+	/* make the keyring available by name if it has one */
+	keyring_publish_name(keyring);
+	return 0;
+}
+
+/*
+ * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit.  Ideally we'd
+ * fold the carry back too, but that requires inline asm.
+ */
+static u64 mult_64x32_and_fold(u64 x, u32 y)
+{
+	u64 hi = (u64)(u32)(x >> 32) * y;
+	u64 lo = (u64)(u32)(x) * y;
+	return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32);
+}
+
+/*
+ * Hash a key type and description.
+ */
+static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
+{
+	const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
+	const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
+	const char *description = index_key->description;
+	unsigned long hash, type;
+	u32 piece;
+	u64 acc;
+	int n, desc_len = index_key->desc_len;
+
+	type = (unsigned long)index_key->type;
+
+	acc = mult_64x32_and_fold(type, desc_len + 13);
+	acc = mult_64x32_and_fold(acc, 9207);
+	for (;;) {
+		n = desc_len;
+		if (n <= 0)
+			break;
+		if (n > 4)
+			n = 4;
+		piece = 0;
+		memcpy(&piece, description, n);
+		description += n;
+		desc_len -= n;
+		acc = mult_64x32_and_fold(acc, piece);
+		acc = mult_64x32_and_fold(acc, 9207);
+	}
+
+	/* Fold the hash down to 32 bits if need be. */
+	hash = acc;
+	if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32)
+		hash ^= acc >> 32;
+
+	/* Squidge all the keyrings into a separate part of the tree to
+	 * ordinary keys by making sure the lowest level segment in the hash is
+	 * zero for keyrings and non-zero otherwise.
+	 */
+	if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
+		return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
+	if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
+		return (hash + (hash << level_shift)) & ~fan_mask;
+	return hash;
+}
+
+/*
+ * Build the next index key chunk.
+ *
+ * On 32-bit systems the index key is laid out as:
+ *
+ *	0	4	5	9...
+ *	hash	desclen	typeptr	desc[]
+ *
+ * On 64-bit systems:
+ *
+ *	0	8	9	17...
+ *	hash	desclen	typeptr	desc[]
+ *
+ * We return it one word-sized chunk at a time.
+ */
+static unsigned long keyring_get_key_chunk(const void *data, int level)
+{
+	const struct keyring_index_key *index_key = data;
+	unsigned long chunk = 0;
+	long offset = 0;
+	int desc_len = index_key->desc_len, n = sizeof(chunk);
+
+	level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
+	switch (level) {
+	case 0:
+		return hash_key_type_and_desc(index_key);
+	case 1:
+		return ((unsigned long)index_key->type << 8) | desc_len;
+	case 2:
+		if (desc_len == 0)
+			return (u8)((unsigned long)index_key->type >>
+				    (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
+		n--;
+		offset = 1;
+	default:
+		offset += sizeof(chunk) - 1;
+		offset += (level - 3) * sizeof(chunk);
+		if (offset >= desc_len)
+			return 0;
+		desc_len -= offset;
+		if (desc_len > n)
+			desc_len = n;
+		offset += desc_len;
+		do {
+			chunk <<= 8;
+			chunk |= ((u8*)index_key->description)[--offset];
+		} while (--desc_len > 0);
+
+		if (level == 2) {
+			chunk <<= 8;
+			chunk |= (u8)((unsigned long)index_key->type >>
+				      (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
+		}
+		return chunk;
+	}
+}
+
+static unsigned long keyring_get_object_key_chunk(const void *object, int level)
+{
+	const struct key *key = keyring_ptr_to_key(object);
+	return keyring_get_key_chunk(&key->index_key, level);
+}
+
+static bool keyring_compare_object(const void *object, const void *data)
+{
+	const struct keyring_index_key *index_key = data;
+	const struct key *key = keyring_ptr_to_key(object);
+
+	return key->index_key.type == index_key->type &&
+		key->index_key.desc_len == index_key->desc_len &&
+		memcmp(key->index_key.description, index_key->description,
+		       index_key->desc_len) == 0;
+}
+
+/*
+ * Compare the index keys of a pair of objects and determine the bit position
+ * at which they differ - if they differ.
+ */
+static int keyring_diff_objects(const void *object, const void *data)
+{
+	const struct key *key_a = keyring_ptr_to_key(object);
+	const struct keyring_index_key *a = &key_a->index_key;
+	const struct keyring_index_key *b = data;
+	unsigned long seg_a, seg_b;
+	int level, i;
+
+	level = 0;
+	seg_a = hash_key_type_and_desc(a);
+	seg_b = hash_key_type_and_desc(b);
+	if ((seg_a ^ seg_b) != 0)
+		goto differ;
+
+	/* The number of bits contributed by the hash is controlled by a
+	 * constant in the assoc_array headers.  Everything else thereafter we
+	 * can deal with as being machine word-size dependent.
+	 */
+	level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
+	seg_a = a->desc_len;
+	seg_b = b->desc_len;
+	if ((seg_a ^ seg_b) != 0)
+		goto differ;
+
+	/* The next bit may not work on big endian */
+	level++;
+	seg_a = (unsigned long)a->type;
+	seg_b = (unsigned long)b->type;
+	if ((seg_a ^ seg_b) != 0)
+		goto differ;
+
+	level += sizeof(unsigned long);
+	if (a->desc_len == 0)
+		goto same;
+
+	i = 0;
+	if (((unsigned long)a->description | (unsigned long)b->description) &
+	    (sizeof(unsigned long) - 1)) {
+		do {
+			seg_a = *(unsigned long *)(a->description + i);
+			seg_b = *(unsigned long *)(b->description + i);
+			if ((seg_a ^ seg_b) != 0)
+				goto differ_plus_i;
+			i += sizeof(unsigned long);
+		} while (i < (a->desc_len & (sizeof(unsigned long) - 1)));
+	}
+
+	for (; i < a->desc_len; i++) {
+		seg_a = *(unsigned char *)(a->description + i);
+		seg_b = *(unsigned char *)(b->description + i);
+		if ((seg_a ^ seg_b) != 0)
+			goto differ_plus_i;
+	}
+
+same:
+	return -1;
+
+differ_plus_i:
+	level += i;
+differ:
+	i = level * 8 + __ffs(seg_a ^ seg_b);
+	return i;
+}
+
+/*
+ * Free an object after stripping the keyring flag off of the pointer.
+ */
+static void keyring_free_object(void *object)
+{
+	key_put(keyring_ptr_to_key(object));
+}
+
+/*
+ * Operations for keyring management by the index-tree routines.
+ */
+static const struct assoc_array_ops keyring_assoc_array_ops = {
+	.get_key_chunk		= keyring_get_key_chunk,
+	.get_object_key_chunk	= keyring_get_object_key_chunk,
+	.compare_object		= keyring_compare_object,
+	.diff_objects		= keyring_diff_objects,
+	.free_object		= keyring_free_object,
+};
+
+/*
+ * Clean up a keyring when it is destroyed.  Unpublish its name if it had one
+ * and dispose of its data.
+ *
+ * The garbage collector detects the final key_put(), removes the keyring from
+ * the serial number tree and then does RCU synchronisation before coming here,
+ * so we shouldn't need to worry about code poking around here with the RCU
+ * readlock held by this time.
+ */
+static void keyring_destroy(struct key *keyring)
+{
+	if (keyring->description) {
+		write_lock(&keyring_name_lock);
+
+		if (keyring->name_link.next != NULL &&
+		    !list_empty(&keyring->name_link))
+			list_del(&keyring->name_link);
+
+		write_unlock(&keyring_name_lock);
+	}
+
+	assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops);
+}
+
+/*
+ * Describe a keyring for /proc.
+ */
+static void keyring_describe(const struct key *keyring, struct seq_file *m)
+{
+	if (keyring->description)
+		seq_puts(m, keyring->description);
+	else
+		seq_puts(m, "[anon]");
+
+	if (key_is_positive(keyring)) {
+		if (keyring->keys.nr_leaves_on_tree != 0)
+			seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
+		else
+			seq_puts(m, ": empty");
+	}
+}
+
+struct keyring_read_iterator_context {
+	size_t			buflen;
+	size_t			count;
+	key_serial_t __user	*buffer;
+};
+
+static int keyring_read_iterator(const void *object, void *data)
+{
+	struct keyring_read_iterator_context *ctx = data;
+	const struct key *key = keyring_ptr_to_key(object);
+	int ret;
+
+	kenter("{%s,%d},,{%zu/%zu}",
+	       key->type->name, key->serial, ctx->count, ctx->buflen);
+
+	if (ctx->count >= ctx->buflen)
+		return 1;
+
+	ret = put_user(key->serial, ctx->buffer);
+	if (ret < 0)
+		return ret;
+	ctx->buffer++;
+	ctx->count += sizeof(key->serial);
+	return 0;
+}
+
+/*
+ * Read a list of key IDs from the keyring's contents in binary form
+ *
+ * The keyring's semaphore is read-locked by the caller.  This prevents someone
+ * from modifying it under us - which could cause us to read key IDs multiple
+ * times.
+ */
+static long keyring_read(const struct key *keyring,
+			 char __user *buffer, size_t buflen)
+{
+	struct keyring_read_iterator_context ctx;
+	long ret;
+
+	kenter("{%d},,%zu", key_serial(keyring), buflen);
+
+	if (buflen & (sizeof(key_serial_t) - 1))
+		return -EINVAL;
+
+	/* Copy as many key IDs as fit into the buffer */
+	if (buffer && buflen) {
+		ctx.buffer = (key_serial_t __user *)buffer;
+		ctx.buflen = buflen;
+		ctx.count = 0;
+		ret = assoc_array_iterate(&keyring->keys,
+					  keyring_read_iterator, &ctx);
+		if (ret < 0) {
+			kleave(" = %ld [iterate]", ret);
+			return ret;
+		}
+	}
+
+	/* Return the size of the buffer needed */
+	ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
+	if (ret <= buflen)
+		kleave("= %ld [ok]", ret);
+	else
+		kleave("= %ld [buffer too small]", ret);
+	return ret;
+}
+
+/*
+ * Allocate a keyring and link into the destination keyring.
+ */
+struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
+			  const struct cred *cred, key_perm_t perm,
+			  unsigned long flags, struct key *dest)
+{
+	struct key *keyring;
+	int ret;
+
+	keyring = key_alloc(&key_type_keyring, description,
+			    uid, gid, cred, perm, flags);
+	if (!IS_ERR(keyring)) {
+		ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
+		if (ret < 0) {
+			key_put(keyring);
+			keyring = ERR_PTR(ret);
+		}
+	}
+
+	return keyring;
+}
+EXPORT_SYMBOL(keyring_alloc);
+
+/*
+ * By default, we keys found by getting an exact match on their descriptions.
+ */
+bool key_default_cmp(const struct key *key,
+		     const struct key_match_data *match_data)
+{
+	return strcmp(key->description, match_data->raw_data) == 0;
+}
+
+/*
+ * Iteration function to consider each key found.
+ */
+static int keyring_search_iterator(const void *object, void *iterator_data)
+{
+	struct keyring_search_context *ctx = iterator_data;
+	const struct key *key = keyring_ptr_to_key(object);
+	unsigned long kflags = READ_ONCE(key->flags);
+	short state = READ_ONCE(key->state);
+
+	kenter("{%d}", key->serial);
+
+	/* ignore keys not of this type */
+	if (key->type != ctx->index_key.type) {
+		kleave(" = 0 [!type]");
+		return 0;
+	}
+
+	/* skip invalidated, revoked and expired keys */
+	if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+		if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
+			      (1 << KEY_FLAG_REVOKED))) {
+			ctx->result = ERR_PTR(-EKEYREVOKED);
+			kleave(" = %d [invrev]", ctx->skipped_ret);
+			goto skipped;
+		}
+
+		if (key->expiry && ctx->now.tv_sec >= key->expiry) {
+			if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
+				ctx->result = ERR_PTR(-EKEYEXPIRED);
+			kleave(" = %d [expire]", ctx->skipped_ret);
+			goto skipped;
+		}
+	}
+
+	/* keys that don't match */
+	if (!ctx->match_data.cmp(key, &ctx->match_data)) {
+		kleave(" = 0 [!match]");
+		return 0;
+	}
+
+	/* key must have search permissions */
+	if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+	    key_task_permission(make_key_ref(key, ctx->possessed),
+				ctx->cred, KEY_NEED_SEARCH) < 0) {
+		ctx->result = ERR_PTR(-EACCES);
+		kleave(" = %d [!perm]", ctx->skipped_ret);
+		goto skipped;
+	}
+
+	if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+		/* we set a different error code if we pass a negative key */
+		if (state < 0) {
+			ctx->result = ERR_PTR(state);
+			kleave(" = %d [neg]", ctx->skipped_ret);
+			goto skipped;
+		}
+	}
+
+	/* Found */
+	ctx->result = make_key_ref(key, ctx->possessed);
+	kleave(" = 1 [found]");
+	return 1;
+
+skipped:
+	return ctx->skipped_ret;
+}
+
+/*
+ * Search inside a keyring for a key.  We can search by walking to it
+ * directly based on its index-key or we can iterate over the entire
+ * tree looking for it, based on the match function.
+ */
+static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
+{
+	if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) {
+		const void *object;
+
+		object = assoc_array_find(&keyring->keys,
+					  &keyring_assoc_array_ops,
+					  &ctx->index_key);
+		return object ? ctx->iterator(object, ctx) : 0;
+	}
+	return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx);
+}
+
+/*
+ * Search a tree of keyrings that point to other keyrings up to the maximum
+ * depth.
+ */
+static bool search_nested_keyrings(struct key *keyring,
+				   struct keyring_search_context *ctx)
+{
+	struct {
+		struct key *keyring;
+		struct assoc_array_node *node;
+		int slot;
+	} stack[KEYRING_SEARCH_MAX_DEPTH];
+
+	struct assoc_array_shortcut *shortcut;
+	struct assoc_array_node *node;
+	struct assoc_array_ptr *ptr;
+	struct key *key;
+	int sp = 0, slot;
+
+	kenter("{%d},{%s,%s}",
+	       keyring->serial,
+	       ctx->index_key.type->name,
+	       ctx->index_key.description);
+
+#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
+	BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
+	       (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
+
+	if (ctx->index_key.description)
+		ctx->index_key.desc_len = strlen(ctx->index_key.description);
+
+	/* Check to see if this top-level keyring is what we are looking for
+	 * and whether it is valid or not.
+	 */
+	if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
+	    keyring_compare_object(keyring, &ctx->index_key)) {
+		ctx->skipped_ret = 2;
+		switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
+		case 1:
+			goto found;
+		case 2:
+			return false;
+		default:
+			break;
+		}
+	}
+
+	ctx->skipped_ret = 0;
+
+	/* Start processing a new keyring */
+descend_to_keyring:
+	kdebug("descend to %d", keyring->serial);
+	if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+			      (1 << KEY_FLAG_REVOKED)))
+		goto not_this_keyring;
+
+	/* Search through the keys in this keyring before its searching its
+	 * subtrees.
+	 */
+	if (search_keyring(keyring, ctx))
+		goto found;
+
+	/* Then manually iterate through the keyrings nested in this one.
+	 *
+	 * Start from the root node of the index tree.  Because of the way the
+	 * hash function has been set up, keyrings cluster on the leftmost
+	 * branch of the root node (root slot 0) or in the root node itself.
+	 * Non-keyrings avoid the leftmost branch of the root entirely (root
+	 * slots 1-15).
+	 */
+	ptr = ACCESS_ONCE(keyring->keys.root);
+	if (!ptr)
+		goto not_this_keyring;
+
+	if (assoc_array_ptr_is_shortcut(ptr)) {
+		/* If the root is a shortcut, either the keyring only contains
+		 * keyring pointers (everything clusters behind root slot 0) or
+		 * doesn't contain any keyring pointers.
+		 */
+		shortcut = assoc_array_ptr_to_shortcut(ptr);
+		smp_read_barrier_depends();
+		if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
+			goto not_this_keyring;
+
+		ptr = ACCESS_ONCE(shortcut->next_node);
+		node = assoc_array_ptr_to_node(ptr);
+		goto begin_node;
+	}
+
+	node = assoc_array_ptr_to_node(ptr);
+	smp_read_barrier_depends();
+
+	ptr = node->slots[0];
+	if (!assoc_array_ptr_is_meta(ptr))
+		goto begin_node;
+
+descend_to_node:
+	/* Descend to a more distal node in this keyring's content tree and go
+	 * through that.
+	 */
+	kdebug("descend");
+	if (assoc_array_ptr_is_shortcut(ptr)) {
+		shortcut = assoc_array_ptr_to_shortcut(ptr);
+		smp_read_barrier_depends();
+		ptr = ACCESS_ONCE(shortcut->next_node);
+		BUG_ON(!assoc_array_ptr_is_node(ptr));
+	}
+	node = assoc_array_ptr_to_node(ptr);
+
+begin_node:
+	kdebug("begin_node");
+	smp_read_barrier_depends();
+	slot = 0;
+ascend_to_node:
+	/* Go through the slots in a node */
+	for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+		ptr = ACCESS_ONCE(node->slots[slot]);
+
+		if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
+			goto descend_to_node;
+
+		if (!keyring_ptr_is_keyring(ptr))
+			continue;
+
+		key = keyring_ptr_to_key(ptr);
+
+		if (sp >= KEYRING_SEARCH_MAX_DEPTH) {
+			if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) {
+				ctx->result = ERR_PTR(-ELOOP);
+				return false;
+			}
+			goto not_this_keyring;
+		}
+
+		/* Search a nested keyring */
+		if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+		    key_task_permission(make_key_ref(key, ctx->possessed),
+					ctx->cred, KEY_NEED_SEARCH) < 0)
+			continue;
+
+		/* stack the current position */
+		stack[sp].keyring = keyring;
+		stack[sp].node = node;
+		stack[sp].slot = slot;
+		sp++;
+
+		/* begin again with the new keyring */
+		keyring = key;
+		goto descend_to_keyring;
+	}
+
+	/* We've dealt with all the slots in the current node, so now we need
+	 * to ascend to the parent and continue processing there.
+	 */
+	ptr = ACCESS_ONCE(node->back_pointer);
+	slot = node->parent_slot;
+
+	if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
+		shortcut = assoc_array_ptr_to_shortcut(ptr);
+		smp_read_barrier_depends();
+		ptr = ACCESS_ONCE(shortcut->back_pointer);
+		slot = shortcut->parent_slot;
+	}
+	if (!ptr)
+		goto not_this_keyring;
+	node = assoc_array_ptr_to_node(ptr);
+	smp_read_barrier_depends();
+	slot++;
+
+	/* If we've ascended to the root (zero backpointer), we must have just
+	 * finished processing the leftmost branch rather than the root slots -
+	 * so there can't be any more keyrings for us to find.
+	 */
+	if (node->back_pointer) {
+		kdebug("ascend %d", slot);
+		goto ascend_to_node;
+	}
+
+	/* The keyring we're looking at was disqualified or didn't contain a
+	 * matching key.
+	 */
+not_this_keyring:
+	kdebug("not_this_keyring %d", sp);
+	if (sp <= 0) {
+		kleave(" = false");
+		return false;
+	}
+
+	/* Resume the processing of a keyring higher up in the tree */
+	sp--;
+	keyring = stack[sp].keyring;
+	node = stack[sp].node;
+	slot = stack[sp].slot + 1;
+	kdebug("ascend to %d [%d]", keyring->serial, slot);
+	goto ascend_to_node;
+
+	/* We found a viable match */
+found:
+	key = key_ref_to_ptr(ctx->result);
+	key_check(key);
+	if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
+		key->last_used_at = ctx->now.tv_sec;
+		keyring->last_used_at = ctx->now.tv_sec;
+		while (sp > 0)
+			stack[--sp].keyring->last_used_at = ctx->now.tv_sec;
+	}
+	kleave(" = true");
+	return true;
+}
+
+/**
+ * keyring_search_aux - Search a keyring tree for a key matching some criteria
+ * @keyring_ref: A pointer to the keyring with possession indicator.
+ * @ctx: The keyring search context.
+ *
+ * Search the supplied keyring tree for a key that matches the criteria given.
+ * The root keyring and any linked keyrings must grant Search permission to the
+ * caller to be searchable and keys can only be found if they too grant Search
+ * to the caller. The possession flag on the root keyring pointer controls use
+ * of the possessor bits in permissions checking of the entire tree.  In
+ * addition, the LSM gets to forbid keyring searches and key matches.
+ *
+ * The search is performed as a breadth-then-depth search up to the prescribed
+ * limit (KEYRING_SEARCH_MAX_DEPTH).
+ *
+ * Keys are matched to the type provided and are then filtered by the match
+ * function, which is given the description to use in any way it sees fit.  The
+ * match function may use any attributes of a key that it wishes to to
+ * determine the match.  Normally the match function from the key type would be
+ * used.
+ *
+ * RCU can be used to prevent the keyring key lists from disappearing without
+ * the need to take lots of locks.
+ *
+ * Returns a pointer to the found key and increments the key usage count if
+ * successful; -EAGAIN if no matching keys were found, or if expired or revoked
+ * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
+ * specified keyring wasn't a keyring.
+ *
+ * In the case of a successful return, the possession attribute from
+ * @keyring_ref is propagated to the returned key reference.
+ */
+key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+			     struct keyring_search_context *ctx)
+{
+	struct key *keyring;
+	long err;
+
+	ctx->iterator = keyring_search_iterator;
+	ctx->possessed = is_key_possessed(keyring_ref);
+	ctx->result = ERR_PTR(-EAGAIN);
+
+	keyring = key_ref_to_ptr(keyring_ref);
+	key_check(keyring);
+
+	if (keyring->type != &key_type_keyring)
+		return ERR_PTR(-ENOTDIR);
+
+	if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) {
+		err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH);
+		if (err < 0)
+			return ERR_PTR(err);
+	}
+
+	rcu_read_lock();
+	ctx->now = current_kernel_time();
+	if (search_nested_keyrings(keyring, ctx))
+		__key_get(key_ref_to_ptr(ctx->result));
+	rcu_read_unlock();
+	return ctx->result;
+}
+
+/**
+ * keyring_search - Search the supplied keyring tree for a matching key
+ * @keyring: The root of the keyring tree to be searched.
+ * @type: The type of keyring we want to find.
+ * @description: The name of the keyring we want to find.
+ *
+ * As keyring_search_aux() above, but using the current task's credentials and
+ * type's default matching function and preferred search method.
+ */
+key_ref_t keyring_search(key_ref_t keyring,
+			 struct key_type *type,
+			 const char *description)
+{
+	struct keyring_search_context ctx = {
+		.index_key.type		= type,
+		.index_key.description	= description,
+		.cred			= current_cred(),
+		.match_data.cmp		= key_default_cmp,
+		.match_data.raw_data	= description,
+		.match_data.lookup_type	= KEYRING_SEARCH_LOOKUP_DIRECT,
+		.flags			= KEYRING_SEARCH_DO_STATE_CHECK,
+	};
+	key_ref_t key;
+	int ret;
+
+	if (type->match_preparse) {
+		ret = type->match_preparse(&ctx.match_data);
+		if (ret < 0)
+			return ERR_PTR(ret);
+	}
+
+	key = keyring_search_aux(keyring, &ctx);
+
+	if (type->match_free)
+		type->match_free(&ctx.match_data);
+	return key;
+}
+EXPORT_SYMBOL(keyring_search);
+
+/*
+ * Search the given keyring for a key that might be updated.
+ *
+ * The caller must guarantee that the keyring is a keyring and that the
+ * permission is granted to modify the keyring as no check is made here.  The
+ * caller must also hold a lock on the keyring semaphore.
+ *
+ * Returns a pointer to the found key with usage count incremented if
+ * successful and returns NULL if not found.  Revoked and invalidated keys are
+ * skipped over.
+ *
+ * If successful, the possession indicator is propagated from the keyring ref
+ * to the returned key reference.
+ */
+key_ref_t find_key_to_update(key_ref_t keyring_ref,
+			     const struct keyring_index_key *index_key)
+{
+	struct key *keyring, *key;
+	const void *object;
+
+	keyring = key_ref_to_ptr(keyring_ref);
+
+	kenter("{%d},{%s,%s}",
+	       keyring->serial, index_key->type->name, index_key->description);
+
+	object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops,
+				  index_key);
+
+	if (object)
+		goto found;
+
+	kleave(" = NULL");
+	return NULL;
+
+found:
+	key = keyring_ptr_to_key(object);
+	if (key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+			  (1 << KEY_FLAG_REVOKED))) {
+		kleave(" = NULL [x]");
+		return NULL;
+	}
+	__key_get(key);
+	kleave(" = {%d}", key->serial);
+	return make_key_ref(key, is_key_possessed(keyring_ref));
+}
+
+/*
+ * Find a keyring with the specified name.
+ *
+ * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
+ * user in the current user namespace are considered.  If @uid_keyring is %true,
+ * the keyring additionally must have been allocated as a user or user session
+ * keyring; otherwise, it must grant Search permission directly to the caller.
+ *
+ * Returns a pointer to the keyring with the keyring's refcount having being
+ * incremented on success.  -ENOKEY is returned if a key could not be found.
+ */
+struct key *find_keyring_by_name(const char *name, bool uid_keyring)
+{
+	struct key *keyring;
+	int bucket;
+
+	if (!name)
+		return ERR_PTR(-EINVAL);
+
+	bucket = keyring_hash(name);
+
+	read_lock(&keyring_name_lock);
+
+	if (keyring_name_hash[bucket].next) {
+		/* search this hash bucket for a keyring with a matching name
+		 * that's readable and that hasn't been revoked */
+		list_for_each_entry(keyring,
+				    &keyring_name_hash[bucket],
+				    name_link
+				    ) {
+			if (!kuid_has_mapping(current_user_ns(), keyring->user->uid))
+				continue;
+
+			if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+				continue;
+
+			if (strcmp(keyring->description, name) != 0)
+				continue;
+
+			if (uid_keyring) {
+				if (!test_bit(KEY_FLAG_UID_KEYRING,
+					      &keyring->flags))
+					continue;
+			} else {
+				if (key_permission(make_key_ref(keyring, 0),
+						   KEY_NEED_SEARCH) < 0)
+					continue;
+			}
+
+			/* we've got a match but we might end up racing with
+			 * key_cleanup() if the keyring is currently 'dead'
+			 * (ie. it has a zero usage count) */
+			if (!atomic_inc_not_zero(&keyring->usage))
+				continue;
+			keyring->last_used_at = current_kernel_time().tv_sec;
+			goto out;
+		}
+	}
+
+	keyring = ERR_PTR(-ENOKEY);
+out:
+	read_unlock(&keyring_name_lock);
+	return keyring;
+}
+
+static int keyring_detect_cycle_iterator(const void *object,
+					 void *iterator_data)
+{
+	struct keyring_search_context *ctx = iterator_data;
+	const struct key *key = keyring_ptr_to_key(object);
+
+	kenter("{%d}", key->serial);
+
+	/* We might get a keyring with matching index-key that is nonetheless a
+	 * different keyring. */
+	if (key != ctx->match_data.raw_data)
+		return 0;
+
+	ctx->result = ERR_PTR(-EDEADLK);
+	return 1;
+}
+
+/*
+ * See if a cycle will will be created by inserting acyclic tree B in acyclic
+ * tree A at the topmost level (ie: as a direct child of A).
+ *
+ * Since we are adding B to A at the top level, checking for cycles should just
+ * be a matter of seeing if node A is somewhere in tree B.
+ */
+static int keyring_detect_cycle(struct key *A, struct key *B)
+{
+	struct keyring_search_context ctx = {
+		.index_key		= A->index_key,
+		.match_data.raw_data	= A,
+		.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+		.iterator		= keyring_detect_cycle_iterator,
+		.flags			= (KEYRING_SEARCH_NO_STATE_CHECK |
+					   KEYRING_SEARCH_NO_UPDATE_TIME |
+					   KEYRING_SEARCH_NO_CHECK_PERM |
+					   KEYRING_SEARCH_DETECT_TOO_DEEP),
+	};
+
+	rcu_read_lock();
+	search_nested_keyrings(B, &ctx);
+	rcu_read_unlock();
+	return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result);
+}
+
+/*
+ * Preallocate memory so that a key can be linked into to a keyring.
+ */
+int __key_link_begin(struct key *keyring,
+		     const struct keyring_index_key *index_key,
+		     struct assoc_array_edit **_edit)
+	__acquires(&keyring->sem)
+	__acquires(&keyring_serialise_link_sem)
+{
+	struct assoc_array_edit *edit;
+	int ret;
+
+	kenter("%d,%s,%s,",
+	       keyring->serial, index_key->type->name, index_key->description);
+
+	BUG_ON(index_key->desc_len == 0);
+
+	if (keyring->type != &key_type_keyring)
+		return -ENOTDIR;
+
+	down_write(&keyring->sem);
+
+	ret = -EKEYREVOKED;
+	if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+		goto error_krsem;
+
+	/* serialise link/link calls to prevent parallel calls causing a cycle
+	 * when linking two keyring in opposite orders */
+	if (index_key->type == &key_type_keyring)
+		down_write(&keyring_serialise_link_sem);
+
+	/* Create an edit script that will insert/replace the key in the
+	 * keyring tree.
+	 */
+	edit = assoc_array_insert(&keyring->keys,
+				  &keyring_assoc_array_ops,
+				  index_key,
+				  NULL);
+	if (IS_ERR(edit)) {
+		ret = PTR_ERR(edit);
+		goto error_sem;
+	}
+
+	/* If we're not replacing a link in-place then we're going to need some
+	 * extra quota.
+	 */
+	if (!edit->dead_leaf) {
+		ret = key_payload_reserve(keyring,
+					  keyring->datalen + KEYQUOTA_LINK_BYTES);
+		if (ret < 0)
+			goto error_cancel;
+	}
+
+	*_edit = edit;
+	kleave(" = 0");
+	return 0;
+
+error_cancel:
+	assoc_array_cancel_edit(edit);
+error_sem:
+	if (index_key->type == &key_type_keyring)
+		up_write(&keyring_serialise_link_sem);
+error_krsem:
+	up_write(&keyring->sem);
+	kleave(" = %d", ret);
+	return ret;
+}
+
+/*
+ * Check already instantiated keys aren't going to be a problem.
+ *
+ * The caller must have called __key_link_begin(). Don't need to call this for
+ * keys that were created since __key_link_begin() was called.
+ */
+int __key_link_check_live_key(struct key *keyring, struct key *key)
+{
+	if (key->type == &key_type_keyring)
+		/* check that we aren't going to create a cycle by linking one
+		 * keyring to another */
+		return keyring_detect_cycle(keyring, key);
+	return 0;
+}
+
+/*
+ * Link a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called.  Discards any
+ * already extant link to matching key if there is one, so that each keyring
+ * holds at most one link to any given key of a particular type+description
+ * combination.
+ */
+void __key_link(struct key *key, struct assoc_array_edit **_edit)
+{
+	__key_get(key);
+	assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key));
+	assoc_array_apply_edit(*_edit);
+	*_edit = NULL;
+}
+
+/*
+ * Finish linking a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called.
+ */
+void __key_link_end(struct key *keyring,
+		    const struct keyring_index_key *index_key,
+		    struct assoc_array_edit *edit)
+	__releases(&keyring->sem)
+	__releases(&keyring_serialise_link_sem)
+{
+	BUG_ON(index_key->type == NULL);
+	kenter("%d,%s,", keyring->serial, index_key->type->name);
+
+	if (index_key->type == &key_type_keyring)
+		up_write(&keyring_serialise_link_sem);
+
+	if (edit) {
+		if (!edit->dead_leaf) {
+			key_payload_reserve(keyring,
+				keyring->datalen - KEYQUOTA_LINK_BYTES);
+		}
+		assoc_array_cancel_edit(edit);
+	}
+	up_write(&keyring->sem);
+}
+
+/**
+ * key_link - Link a key to a keyring
+ * @keyring: The keyring to make the link in.
+ * @key: The key to link to.
+ *
+ * Make a link in a keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring.
+ *
+ * This function will write-lock the keyring's semaphore and will consume some
+ * of the user's key data quota to hold the link.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
+ * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
+ * full, -EDQUOT if there is insufficient key data quota remaining to add
+ * another link or -ENOMEM if there's insufficient memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
+ */
+int key_link(struct key *keyring, struct key *key)
+{
+	struct assoc_array_edit *edit;
+	int ret;
+
+	kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage));
+
+	key_check(keyring);
+	key_check(key);
+
+	if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) &&
+	    !test_bit(KEY_FLAG_TRUSTED, &key->flags))
+		return -EPERM;
+
+	ret = __key_link_begin(keyring, &key->index_key, &edit);
+	if (ret == 0) {
+		kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage));
+		ret = __key_link_check_live_key(keyring, key);
+		if (ret == 0)
+			__key_link(key, &edit);
+		__key_link_end(keyring, &key->index_key, edit);
+	}
+
+	kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage));
+	return ret;
+}
+EXPORT_SYMBOL(key_link);
+
+/**
+ * key_unlink - Unlink the first link to a key from a keyring.
+ * @keyring: The keyring to remove the link from.
+ * @key: The key the link is to.
+ *
+ * Remove a link from a keyring to a key.
+ *
+ * This function will write-lock the keyring's semaphore.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
+ * the key isn't linked to by the keyring or -ENOMEM if there's insufficient
+ * memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be removed (the keyring should have Write permission; no permissions are
+ * required on the key).
+ */
+int key_unlink(struct key *keyring, struct key *key)
+{
+	struct assoc_array_edit *edit;
+	int ret;
+
+	key_check(keyring);
+	key_check(key);
+
+	if (keyring->type != &key_type_keyring)
+		return -ENOTDIR;
+
+	down_write(&keyring->sem);
+
+	edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
+				  &key->index_key);
+	if (IS_ERR(edit)) {
+		ret = PTR_ERR(edit);
+		goto error;
+	}
+	ret = -ENOENT;
+	if (edit == NULL)
+		goto error;
+
+	assoc_array_apply_edit(edit);
+	key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
+	ret = 0;
+
+error:
+	up_write(&keyring->sem);
+	return ret;
+}
+EXPORT_SYMBOL(key_unlink);
+
+/**
+ * keyring_clear - Clear a keyring
+ * @keyring: The keyring to clear.
+ *
+ * Clear the contents of the specified keyring.
+ *
+ * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
+ */
+int keyring_clear(struct key *keyring)
+{
+	struct assoc_array_edit *edit;
+	int ret;
+
+	if (keyring->type != &key_type_keyring)
+		return -ENOTDIR;
+
+	down_write(&keyring->sem);
+
+	edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+	if (IS_ERR(edit)) {
+		ret = PTR_ERR(edit);
+	} else {
+		if (edit)
+			assoc_array_apply_edit(edit);
+		key_payload_reserve(keyring, 0);
+		ret = 0;
+	}
+
+	up_write(&keyring->sem);
+	return ret;
+}
+EXPORT_SYMBOL(keyring_clear);
+
+/*
+ * Dispose of the links from a revoked keyring.
+ *
+ * This is called with the key sem write-locked.
+ */
+static void keyring_revoke(struct key *keyring)
+{
+	struct assoc_array_edit *edit;
+
+	edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+	if (!IS_ERR(edit)) {
+		if (edit)
+			assoc_array_apply_edit(edit);
+		key_payload_reserve(keyring, 0);
+	}
+}
+
+static bool keyring_gc_select_iterator(void *object, void *iterator_data)
+{
+	struct key *key = keyring_ptr_to_key(object);
+	time_t *limit = iterator_data;
+
+	if (key_is_dead(key, *limit))
+		return false;
+	key_get(key);
+	return true;
+}
+
+static int keyring_gc_check_iterator(const void *object, void *iterator_data)
+{
+	const struct key *key = keyring_ptr_to_key(object);
+	time_t *limit = iterator_data;
+
+	key_check(key);
+	return key_is_dead(key, *limit);
+}
+
+/*
+ * Garbage collect pointers from a keyring.
+ *
+ * Not called with any locks held.  The keyring's key struct will not be
+ * deallocated under us as only our caller may deallocate it.
+ */
+void keyring_gc(struct key *keyring, time_t limit)
+{
+	int result;
+
+	kenter("%x{%s}", keyring->serial, keyring->description ?: "");
+
+	if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+			      (1 << KEY_FLAG_REVOKED)))
+		goto dont_gc;
+
+	/* scan the keyring looking for dead keys */
+	rcu_read_lock();
+	result = assoc_array_iterate(&keyring->keys,
+				     keyring_gc_check_iterator, &limit);
+	rcu_read_unlock();
+	if (result == true)
+		goto do_gc;
+
+dont_gc:
+	kleave(" [no gc]");
+	return;
+
+do_gc:
+	down_write(&keyring->sem);
+	assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops,
+		       keyring_gc_select_iterator, &limit);
+	up_write(&keyring->sem);
+	kleave(" [gc]");
+}
diff --git a/security/keys/permission.c b/security/keys/permission.c
new file mode 100644
index 0000000..732cc0b
--- /dev/null
+++ b/security/keys/permission.c
@@ -0,0 +1,110 @@
+/* Key permission checking
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/security.h>
+#include "internal.h"
+
+/**
+ * key_task_permission - Check a key can be used
+ * @key_ref: The key to check.
+ * @cred: The credentials to use.
+ * @perm: The permissions to check for.
+ *
+ * Check to see whether permission is granted to use a key in the desired way,
+ * but permit the security modules to override.
+ *
+ * The caller must hold either a ref on cred or must hold the RCU readlock.
+ *
+ * Returns 0 if successful, -EACCES if access is denied based on the
+ * permissions bits or the LSM check.
+ */
+int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
+			unsigned perm)
+{
+	struct key *key;
+	key_perm_t kperm;
+	int ret;
+
+	key = key_ref_to_ptr(key_ref);
+
+	/* use the second 8-bits of permissions for keys the caller owns */
+	if (uid_eq(key->uid, cred->fsuid)) {
+		kperm = key->perm >> 16;
+		goto use_these_perms;
+	}
+
+	/* use the third 8-bits of permissions for keys the caller has a group
+	 * membership in common with */
+	if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) {
+		if (gid_eq(key->gid, cred->fsgid)) {
+			kperm = key->perm >> 8;
+			goto use_these_perms;
+		}
+
+		ret = groups_search(cred->group_info, key->gid);
+		if (ret) {
+			kperm = key->perm >> 8;
+			goto use_these_perms;
+		}
+	}
+
+	/* otherwise use the least-significant 8-bits */
+	kperm = key->perm;
+
+use_these_perms:
+
+	/* use the top 8-bits of permissions for keys the caller possesses
+	 * - possessor permissions are additive with other permissions
+	 */
+	if (is_key_possessed(key_ref))
+		kperm |= key->perm >> 24;
+
+	kperm = kperm & perm & KEY_NEED_ALL;
+
+	if (kperm != perm)
+		return -EACCES;
+
+	/* let LSM be the final arbiter */
+	return security_key_permission(key_ref, cred, perm);
+}
+EXPORT_SYMBOL(key_task_permission);
+
+/**
+ * key_validate - Validate a key.
+ * @key: The key to be validated.
+ *
+ * Check that a key is valid, returning 0 if the key is okay, -ENOKEY if the
+ * key is invalidated, -EKEYREVOKED if the key's type has been removed or if
+ * the key has been revoked or -EKEYEXPIRED if the key has expired.
+ */
+int key_validate(const struct key *key)
+{
+	unsigned long flags = key->flags;
+
+	if (flags & (1 << KEY_FLAG_INVALIDATED))
+		return -ENOKEY;
+
+	/* check it's still accessible */
+	if (flags & ((1 << KEY_FLAG_REVOKED) |
+		     (1 << KEY_FLAG_DEAD)))
+		return -EKEYREVOKED;
+
+	/* check it hasn't expired */
+	if (key->expiry) {
+		struct timespec now = current_kernel_time();
+		if (now.tv_sec >= key->expiry)
+			return -EKEYEXPIRED;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
new file mode 100644
index 0000000..c9fae5e
--- /dev/null
+++ b/security/keys/persistent.c
@@ -0,0 +1,167 @@
+/* General persistent per-UID keyrings register
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/user_namespace.h>
+#include "internal.h"
+
+unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */
+
+/*
+ * Create the persistent keyring register for the current user namespace.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static int key_create_persistent_register(struct user_namespace *ns)
+{
+	struct key *reg = keyring_alloc(".persistent_register",
+					KUIDT_INIT(0), KGIDT_INIT(0),
+					current_cred(),
+					((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+					 KEY_USR_VIEW | KEY_USR_READ),
+					KEY_ALLOC_NOT_IN_QUOTA, NULL);
+	if (IS_ERR(reg))
+		return PTR_ERR(reg);
+
+	ns->persistent_keyring_register = reg;
+	return 0;
+}
+
+/*
+ * Create the persistent keyring for the specified user.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static key_ref_t key_create_persistent(struct user_namespace *ns, kuid_t uid,
+				       struct keyring_index_key *index_key)
+{
+	struct key *persistent;
+	key_ref_t reg_ref, persistent_ref;
+
+	if (!ns->persistent_keyring_register) {
+		long err = key_create_persistent_register(ns);
+		if (err < 0)
+			return ERR_PTR(err);
+	} else {
+		reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+		persistent_ref = find_key_to_update(reg_ref, index_key);
+		if (persistent_ref)
+			return persistent_ref;
+	}
+
+	persistent = keyring_alloc(index_key->description,
+				   uid, INVALID_GID, current_cred(),
+				   ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+				    KEY_USR_VIEW | KEY_USR_READ),
+				   KEY_ALLOC_NOT_IN_QUOTA,
+				   ns->persistent_keyring_register);
+	if (IS_ERR(persistent))
+		return ERR_CAST(persistent);
+
+	return make_key_ref(persistent, true);
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
+			       key_ref_t dest_ref)
+{
+	struct keyring_index_key index_key;
+	struct key *persistent;
+	key_ref_t reg_ref, persistent_ref;
+	char buf[32];
+	long ret;
+
+	/* Look in the register if it exists */
+	index_key.type = &key_type_keyring;
+	index_key.description = buf;
+	index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid));
+
+	if (ns->persistent_keyring_register) {
+		reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+		down_read(&ns->persistent_keyring_register_sem);
+		persistent_ref = find_key_to_update(reg_ref, &index_key);
+		up_read(&ns->persistent_keyring_register_sem);
+
+		if (persistent_ref)
+			goto found;
+	}
+
+	/* It wasn't in the register, so we'll need to create it.  We might
+	 * also need to create the register.
+	 */
+	down_write(&ns->persistent_keyring_register_sem);
+	persistent_ref = key_create_persistent(ns, uid, &index_key);
+	up_write(&ns->persistent_keyring_register_sem);
+	if (!IS_ERR(persistent_ref))
+		goto found;
+
+	return PTR_ERR(persistent_ref);
+
+found:
+	ret = key_task_permission(persistent_ref, current_cred(), KEY_NEED_LINK);
+	if (ret == 0) {
+		persistent = key_ref_to_ptr(persistent_ref);
+		ret = key_link(key_ref_to_ptr(dest_ref), persistent);
+		if (ret == 0) {
+			key_set_timeout(persistent, persistent_keyring_expiry);
+			ret = persistent->serial;		
+		}
+	}
+
+	key_ref_put(persistent_ref);
+	return ret;
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+long keyctl_get_persistent(uid_t _uid, key_serial_t destid)
+{
+	struct user_namespace *ns = current_user_ns();
+	key_ref_t dest_ref;
+	kuid_t uid;
+	long ret;
+
+	/* -1 indicates the current user */
+	if (_uid == (uid_t)-1) {
+		uid = current_uid();
+	} else {
+		uid = make_kuid(ns, _uid);
+		if (!uid_valid(uid))
+			return -EINVAL;
+
+		/* You can only see your own persistent cache if you're not
+		 * sufficiently privileged.
+		 */
+		if (!uid_eq(uid, current_uid()) &&
+		    !uid_eq(uid, current_euid()) &&
+		    !ns_capable(ns, CAP_SETUID))
+			return -EPERM;
+	}
+
+	/* There must be a destination keyring */
+	dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+	if (IS_ERR(dest_ref))
+		return PTR_ERR(dest_ref);
+	if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) {
+		ret = -ENOTDIR;
+		goto out_put_dest;
+	}
+
+	ret = key_get_persistent(ns, uid, dest_ref);
+
+out_put_dest:
+	key_ref_put(dest_ref);
+	return ret;
+}
diff --git a/security/keys/proc.c b/security/keys/proc.c
new file mode 100644
index 0000000..0361286
--- /dev/null
+++ b/security/keys/proc.c
@@ -0,0 +1,355 @@
+/* procfs files for key database enumeration
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/errno.h>
+#include "internal.h"
+
+static int proc_keys_open(struct inode *inode, struct file *file);
+static void *proc_keys_start(struct seq_file *p, loff_t *_pos);
+static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos);
+static void proc_keys_stop(struct seq_file *p, void *v);
+static int proc_keys_show(struct seq_file *m, void *v);
+
+static const struct seq_operations proc_keys_ops = {
+	.start	= proc_keys_start,
+	.next	= proc_keys_next,
+	.stop	= proc_keys_stop,
+	.show	= proc_keys_show,
+};
+
+static const struct file_operations proc_keys_fops = {
+	.open		= proc_keys_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int proc_key_users_open(struct inode *inode, struct file *file);
+static void *proc_key_users_start(struct seq_file *p, loff_t *_pos);
+static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos);
+static void proc_key_users_stop(struct seq_file *p, void *v);
+static int proc_key_users_show(struct seq_file *m, void *v);
+
+static const struct seq_operations proc_key_users_ops = {
+	.start	= proc_key_users_start,
+	.next	= proc_key_users_next,
+	.stop	= proc_key_users_stop,
+	.show	= proc_key_users_show,
+};
+
+static const struct file_operations proc_key_users_fops = {
+	.open		= proc_key_users_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+/*
+ * Declare the /proc files.
+ */
+static int __init key_proc_init(void)
+{
+	struct proc_dir_entry *p;
+
+	p = proc_create("keys", 0, NULL, &proc_keys_fops);
+	if (!p)
+		panic("Cannot create /proc/keys\n");
+
+	p = proc_create("key-users", 0, NULL, &proc_key_users_fops);
+	if (!p)
+		panic("Cannot create /proc/key-users\n");
+
+	return 0;
+}
+
+__initcall(key_proc_init);
+
+/*
+ * Implement "/proc/keys" to provide a list of the keys on the system that
+ * grant View permission to the caller.
+ */
+static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
+{
+	struct user_namespace *user_ns = seq_user_ns(p);
+
+	n = rb_next(n);
+	while (n) {
+		struct key *key = rb_entry(n, struct key, serial_node);
+		if (kuid_has_mapping(user_ns, key->user->uid))
+			break;
+		n = rb_next(n);
+	}
+	return n;
+}
+
+static int proc_keys_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &proc_keys_ops);
+}
+
+static struct key *find_ge_key(struct seq_file *p, key_serial_t id)
+{
+	struct user_namespace *user_ns = seq_user_ns(p);
+	struct rb_node *n = key_serial_tree.rb_node;
+	struct key *minkey = NULL;
+
+	while (n) {
+		struct key *key = rb_entry(n, struct key, serial_node);
+		if (id < key->serial) {
+			if (!minkey || minkey->serial > key->serial)
+				minkey = key;
+			n = n->rb_left;
+		} else if (id > key->serial) {
+			n = n->rb_right;
+		} else {
+			minkey = key;
+			break;
+		}
+		key = NULL;
+	}
+
+	if (!minkey)
+		return NULL;
+
+	for (;;) {
+		if (kuid_has_mapping(user_ns, minkey->user->uid))
+			return minkey;
+		n = rb_next(&minkey->serial_node);
+		if (!n)
+			return NULL;
+		minkey = rb_entry(n, struct key, serial_node);
+	}
+}
+
+static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
+	__acquires(key_serial_lock)
+{
+	key_serial_t pos = *_pos;
+	struct key *key;
+
+	spin_lock(&key_serial_lock);
+
+	if (*_pos > INT_MAX)
+		return NULL;
+	key = find_ge_key(p, pos);
+	if (!key)
+		return NULL;
+	*_pos = key->serial;
+	return &key->serial_node;
+}
+
+static inline key_serial_t key_node_serial(struct rb_node *n)
+{
+	struct key *key = rb_entry(n, struct key, serial_node);
+	return key->serial;
+}
+
+static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+	struct rb_node *n;
+
+	n = key_serial_next(p, v);
+	if (n)
+		*_pos = key_node_serial(n);
+	return n;
+}
+
+static void proc_keys_stop(struct seq_file *p, void *v)
+	__releases(key_serial_lock)
+{
+	spin_unlock(&key_serial_lock);
+}
+
+static int proc_keys_show(struct seq_file *m, void *v)
+{
+	struct rb_node *_p = v;
+	struct key *key = rb_entry(_p, struct key, serial_node);
+	struct timespec now;
+	unsigned long timo;
+	key_ref_t key_ref, skey_ref;
+	char xbuf[16];
+	short state;
+	int rc;
+
+	struct keyring_search_context ctx = {
+		.index_key.type		= key->type,
+		.index_key.description	= key->description,
+		.cred			= current_cred(),
+		.match_data.cmp		= lookup_user_key_possessed,
+		.match_data.raw_data	= key,
+		.match_data.lookup_type	= KEYRING_SEARCH_LOOKUP_DIRECT,
+		.flags			= KEYRING_SEARCH_NO_STATE_CHECK,
+	};
+
+	key_ref = make_key_ref(key, 0);
+
+	/* determine if the key is possessed by this process (a test we can
+	 * skip if the key does not indicate the possessor can view it
+	 */
+	if (key->perm & KEY_POS_VIEW) {
+		skey_ref = search_my_process_keyrings(&ctx);
+		if (!IS_ERR(skey_ref)) {
+			key_ref_put(skey_ref);
+			key_ref = make_key_ref(key, 1);
+		}
+	}
+
+	/* check whether the current task is allowed to view the key (assuming
+	 * non-possession)
+	 * - the caller holds a spinlock, and thus the RCU read lock, making our
+	 *   access to __current_cred() safe
+	 */
+	rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
+	if (rc < 0)
+		return 0;
+
+	now = current_kernel_time();
+
+	rcu_read_lock();
+
+	/* come up with a suitable timeout value */
+	if (key->expiry == 0) {
+		memcpy(xbuf, "perm", 5);
+	} else if (now.tv_sec >= key->expiry) {
+		memcpy(xbuf, "expd", 5);
+	} else {
+		timo = key->expiry - now.tv_sec;
+
+		if (timo < 60)
+			sprintf(xbuf, "%lus", timo);
+		else if (timo < 60*60)
+			sprintf(xbuf, "%lum", timo / 60);
+		else if (timo < 60*60*24)
+			sprintf(xbuf, "%luh", timo / (60*60));
+		else if (timo < 60*60*24*7)
+			sprintf(xbuf, "%lud", timo / (60*60*24));
+		else
+			sprintf(xbuf, "%luw", timo / (60*60*24*7));
+	}
+
+	state = key_read_state(key);
+
+#define showflag(KEY, LETTER, FLAG) \
+	(test_bit(FLAG,	&(KEY)->flags) ? LETTER : '-')
+
+	seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
+		   key->serial,
+		   state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
+		   showflag(key, 'R', KEY_FLAG_REVOKED),
+		   showflag(key, 'D', KEY_FLAG_DEAD),
+		   showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
+		   showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
+		   state < 0 ? 'N' : '-',
+		   showflag(key, 'i', KEY_FLAG_INVALIDATED),
+		   atomic_read(&key->usage),
+		   xbuf,
+		   key->perm,
+		   from_kuid_munged(seq_user_ns(m), key->uid),
+		   from_kgid_munged(seq_user_ns(m), key->gid),
+		   key->type->name);
+
+#undef showflag
+
+	if (key->type->describe)
+		key->type->describe(key, m);
+	seq_putc(m, '\n');
+
+	rcu_read_unlock();
+	return 0;
+}
+
+static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n)
+{
+	while (n) {
+		struct key_user *user = rb_entry(n, struct key_user, node);
+		if (kuid_has_mapping(user_ns, user->uid))
+			break;
+		n = rb_next(n);
+	}
+	return n;
+}
+
+static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n)
+{
+	return __key_user_next(user_ns, rb_next(n));
+}
+
+static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r)
+{
+	struct rb_node *n = rb_first(r);
+	return __key_user_next(user_ns, n);
+}
+
+/*
+ * Implement "/proc/key-users" to provides a list of the key users and their
+ * quotas.
+ */
+static int proc_key_users_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &proc_key_users_ops);
+}
+
+static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
+	__acquires(key_user_lock)
+{
+	struct rb_node *_p;
+	loff_t pos = *_pos;
+
+	spin_lock(&key_user_lock);
+
+	_p = key_user_first(seq_user_ns(p), &key_user_tree);
+	while (pos > 0 && _p) {
+		pos--;
+		_p = key_user_next(seq_user_ns(p), _p);
+	}
+
+	return _p;
+}
+
+static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+	(*_pos)++;
+	return key_user_next(seq_user_ns(p), (struct rb_node *)v);
+}
+
+static void proc_key_users_stop(struct seq_file *p, void *v)
+	__releases(key_user_lock)
+{
+	spin_unlock(&key_user_lock);
+}
+
+static int proc_key_users_show(struct seq_file *m, void *v)
+{
+	struct rb_node *_p = v;
+	struct key_user *user = rb_entry(_p, struct key_user, node);
+	unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
+		key_quota_root_maxkeys : key_quota_maxkeys;
+	unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
+		key_quota_root_maxbytes : key_quota_maxbytes;
+
+	seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
+		   from_kuid_munged(seq_user_ns(m), user->uid),
+		   atomic_read(&user->usage),
+		   atomic_read(&user->nkeys),
+		   atomic_read(&user->nikeys),
+		   user->qnkeys,
+		   maxkeys,
+		   user->qnbytes,
+		   maxbytes);
+
+	return 0;
+}
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
new file mode 100644
index 0000000..ac1d5b2
--- /dev/null
+++ b/security/keys/process_keys.c
@@ -0,0 +1,886 @@
+/* Manage a process's keyrings
+ *
+ * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/keyctl.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/security.h>
+#include <linux/user_namespace.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+
+/* Session keyring create vs join semaphore */
+static DEFINE_MUTEX(key_session_mutex);
+
+/* User keyring creation semaphore */
+static DEFINE_MUTEX(key_user_keyring_mutex);
+
+/* The root user's tracking struct */
+struct key_user root_key_user = {
+	.usage		= ATOMIC_INIT(3),
+	.cons_lock	= __MUTEX_INITIALIZER(root_key_user.cons_lock),
+	.lock		= __SPIN_LOCK_UNLOCKED(root_key_user.lock),
+	.nkeys		= ATOMIC_INIT(2),
+	.nikeys		= ATOMIC_INIT(2),
+	.uid		= GLOBAL_ROOT_UID,
+};
+
+/*
+ * Install the user and user session keyrings for the current process's UID.
+ */
+int install_user_keyrings(void)
+{
+	struct user_struct *user;
+	const struct cred *cred;
+	struct key *uid_keyring, *session_keyring;
+	key_perm_t user_keyring_perm;
+	char buf[20];
+	int ret;
+	uid_t uid;
+
+	user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
+	cred = current_cred();
+	user = cred->user;
+	uid = from_kuid(cred->user_ns, user->uid);
+
+	kenter("%p{%u}", user, uid);
+
+	if (user->uid_keyring && user->session_keyring) {
+		kleave(" = 0 [exist]");
+		return 0;
+	}
+
+	mutex_lock(&key_user_keyring_mutex);
+	ret = 0;
+
+	if (!user->uid_keyring) {
+		/* get the UID-specific keyring
+		 * - there may be one in existence already as it may have been
+		 *   pinned by a session, but the user_struct pointing to it
+		 *   may have been destroyed by setuid */
+		sprintf(buf, "_uid.%u", uid);
+
+		uid_keyring = find_keyring_by_name(buf, true);
+		if (IS_ERR(uid_keyring)) {
+			uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
+						    cred, user_keyring_perm,
+						    KEY_ALLOC_UID_KEYRING |
+							KEY_ALLOC_IN_QUOTA,
+						    NULL);
+			if (IS_ERR(uid_keyring)) {
+				ret = PTR_ERR(uid_keyring);
+				goto error;
+			}
+		}
+
+		/* get a default session keyring (which might also exist
+		 * already) */
+		sprintf(buf, "_uid_ses.%u", uid);
+
+		session_keyring = find_keyring_by_name(buf, true);
+		if (IS_ERR(session_keyring)) {
+			session_keyring =
+				keyring_alloc(buf, user->uid, INVALID_GID,
+					      cred, user_keyring_perm,
+					      KEY_ALLOC_UID_KEYRING |
+						  KEY_ALLOC_IN_QUOTA,
+					      NULL);
+			if (IS_ERR(session_keyring)) {
+				ret = PTR_ERR(session_keyring);
+				goto error_release;
+			}
+
+			/* we install a link from the user session keyring to
+			 * the user keyring */
+			ret = key_link(session_keyring, uid_keyring);
+			if (ret < 0)
+				goto error_release_both;
+		}
+
+		/* install the keyrings */
+		user->uid_keyring = uid_keyring;
+		user->session_keyring = session_keyring;
+	}
+
+	mutex_unlock(&key_user_keyring_mutex);
+	kleave(" = 0");
+	return 0;
+
+error_release_both:
+	key_put(session_keyring);
+error_release:
+	key_put(uid_keyring);
+error:
+	mutex_unlock(&key_user_keyring_mutex);
+	kleave(" = %d", ret);
+	return ret;
+}
+
+/*
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already.  This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
+ */
+int install_thread_keyring_to_cred(struct cred *new)
+{
+	struct key *keyring;
+
+	if (new->thread_keyring)
+		return 0;
+
+	keyring = keyring_alloc("_tid", new->uid, new->gid, new,
+				KEY_POS_ALL | KEY_USR_VIEW,
+				KEY_ALLOC_QUOTA_OVERRUN, NULL);
+	if (IS_ERR(keyring))
+		return PTR_ERR(keyring);
+
+	new->thread_keyring = keyring;
+	return 0;
+}
+
+/*
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
+ */
+static int install_thread_keyring(void)
+{
+	struct cred *new;
+	int ret;
+
+	new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+
+	ret = install_thread_keyring_to_cred(new);
+	if (ret < 0) {
+		abort_creds(new);
+		return ret;
+	}
+
+	return commit_creds(new);
+}
+
+/*
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already.  This is allowed to overrun the quota.
+ *
+ * Return: 0 if a process keyring is now present; -errno on failure.
+ */
+int install_process_keyring_to_cred(struct cred *new)
+{
+	struct key *keyring;
+
+	if (new->process_keyring)
+		return 0;
+
+	keyring = keyring_alloc("_pid", new->uid, new->gid, new,
+				KEY_POS_ALL | KEY_USR_VIEW,
+				KEY_ALLOC_QUOTA_OVERRUN, NULL);
+	if (IS_ERR(keyring))
+		return PTR_ERR(keyring);
+
+	new->process_keyring = keyring;
+	return 0;
+}
+
+/*
+ * Install a process keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a process keyring is now present; -errno on failure.
+ */
+static int install_process_keyring(void)
+{
+	struct cred *new;
+	int ret;
+
+	new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+
+	ret = install_process_keyring_to_cred(new);
+	if (ret < 0) {
+		abort_creds(new);
+		return ret;
+	}
+
+	return commit_creds(new);
+}
+
+/*
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any.  If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
+{
+	unsigned long flags;
+	struct key *old;
+
+	might_sleep();
+
+	/* create an empty session keyring */
+	if (!keyring) {
+		flags = KEY_ALLOC_QUOTA_OVERRUN;
+		if (cred->session_keyring)
+			flags = KEY_ALLOC_IN_QUOTA;
+
+		keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
+					KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
+					flags, NULL);
+		if (IS_ERR(keyring))
+			return PTR_ERR(keyring);
+	} else {
+		__key_get(keyring);
+	}
+
+	/* install the keyring */
+	old = cred->session_keyring;
+	rcu_assign_pointer(cred->session_keyring, keyring);
+
+	if (old)
+		key_put(old);
+
+	return 0;
+}
+
+/*
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any.  If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+static int install_session_keyring(struct key *keyring)
+{
+	struct cred *new;
+	int ret;
+
+	new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+
+	ret = install_session_keyring_to_cred(new, keyring);
+	if (ret < 0) {
+		abort_creds(new);
+		return ret;
+	}
+
+	return commit_creds(new);
+}
+
+/*
+ * Handle the fsuid changing.
+ */
+void key_fsuid_changed(struct task_struct *tsk)
+{
+	/* update the ownership of the thread keyring */
+	BUG_ON(!tsk->cred);
+	if (tsk->cred->thread_keyring) {
+		down_write(&tsk->cred->thread_keyring->sem);
+		tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
+		up_write(&tsk->cred->thread_keyring->sem);
+	}
+}
+
+/*
+ * Handle the fsgid changing.
+ */
+void key_fsgid_changed(struct task_struct *tsk)
+{
+	/* update the ownership of the thread keyring */
+	BUG_ON(!tsk->cred);
+	if (tsk->cred->thread_keyring) {
+		down_write(&tsk->cred->thread_keyring->sem);
+		tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
+		up_write(&tsk->cred->thread_keyring->sem);
+	}
+}
+
+/*
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key.
+ *
+ * The search criteria are the type and the match function.  The description is
+ * given to the match function as a parameter, but doesn't otherwise influence
+ * the search.  Typically the match function will compare the description
+ * parameter to the key's description.
+ *
+ * This can only search keyrings that grant Search permission to the supplied
+ * credentials.  Keyrings linked to searched keyrings will also be searched if
+ * they grant Search permission too.  Keys can only be found if they grant
+ * Search permission to the credentials.
+ *
+ * Returns a pointer to the key with the key usage count incremented if
+ * successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only
+ * matched negative keys.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
+ */
+key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
+{
+	key_ref_t key_ref, ret, err;
+
+	/* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
+	 * searchable, but we failed to find a key or we found a negative key;
+	 * otherwise we want to return a sample error (probably -EACCES) if
+	 * none of the keyrings were searchable
+	 *
+	 * in terms of priority: success > -ENOKEY > -EAGAIN > other error
+	 */
+	key_ref = NULL;
+	ret = NULL;
+	err = ERR_PTR(-EAGAIN);
+
+	/* search the thread keyring first */
+	if (ctx->cred->thread_keyring) {
+		key_ref = keyring_search_aux(
+			make_key_ref(ctx->cred->thread_keyring, 1), ctx);
+		if (!IS_ERR(key_ref))
+			goto found;
+
+		switch (PTR_ERR(key_ref)) {
+		case -EAGAIN: /* no key */
+		case -ENOKEY: /* negative key */
+			ret = key_ref;
+			break;
+		default:
+			err = key_ref;
+			break;
+		}
+	}
+
+	/* search the process keyring second */
+	if (ctx->cred->process_keyring) {
+		key_ref = keyring_search_aux(
+			make_key_ref(ctx->cred->process_keyring, 1), ctx);
+		if (!IS_ERR(key_ref))
+			goto found;
+
+		switch (PTR_ERR(key_ref)) {
+		case -EAGAIN: /* no key */
+			if (ret)
+				break;
+		case -ENOKEY: /* negative key */
+			ret = key_ref;
+			break;
+		default:
+			err = key_ref;
+			break;
+		}
+	}
+
+	/* search the session keyring */
+	if (ctx->cred->session_keyring) {
+		rcu_read_lock();
+		key_ref = keyring_search_aux(
+			make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1),
+			ctx);
+		rcu_read_unlock();
+
+		if (!IS_ERR(key_ref))
+			goto found;
+
+		switch (PTR_ERR(key_ref)) {
+		case -EAGAIN: /* no key */
+			if (ret)
+				break;
+		case -ENOKEY: /* negative key */
+			ret = key_ref;
+			break;
+		default:
+			err = key_ref;
+			break;
+		}
+	}
+	/* or search the user-session keyring */
+	else if (ctx->cred->user->session_keyring) {
+		key_ref = keyring_search_aux(
+			make_key_ref(ctx->cred->user->session_keyring, 1),
+			ctx);
+		if (!IS_ERR(key_ref))
+			goto found;
+
+		switch (PTR_ERR(key_ref)) {
+		case -EAGAIN: /* no key */
+			if (ret)
+				break;
+		case -ENOKEY: /* negative key */
+			ret = key_ref;
+			break;
+		default:
+			err = key_ref;
+			break;
+		}
+	}
+
+	/* no key - decide on the error we're going to go for */
+	key_ref = ret ? ret : err;
+
+found:
+	return key_ref;
+}
+
+/*
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key in the manner of search_my_process_keyrings(), but also search
+ * the keys attached to the assumed authorisation key using its credentials if
+ * one is available.
+ *
+ * Return same as search_my_process_keyrings().
+ */
+key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
+{
+	struct request_key_auth *rka;
+	key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
+
+	might_sleep();
+
+	key_ref = search_my_process_keyrings(ctx);
+	if (!IS_ERR(key_ref))
+		goto found;
+	err = key_ref;
+
+	/* if this process has an instantiation authorisation key, then we also
+	 * search the keyrings of the process mentioned there
+	 * - we don't permit access to request_key auth keys via this method
+	 */
+	if (ctx->cred->request_key_auth &&
+	    ctx->cred == current_cred() &&
+	    ctx->index_key.type != &key_type_request_key_auth
+	    ) {
+		const struct cred *cred = ctx->cred;
+
+		/* defend against the auth key being revoked */
+		down_read(&cred->request_key_auth->sem);
+
+		if (key_validate(ctx->cred->request_key_auth) == 0) {
+			rka = ctx->cred->request_key_auth->payload.data[0];
+
+			ctx->cred = rka->cred;
+			key_ref = search_process_keyrings(ctx);
+			ctx->cred = cred;
+
+			up_read(&cred->request_key_auth->sem);
+
+			if (!IS_ERR(key_ref))
+				goto found;
+
+			ret = key_ref;
+		} else {
+			up_read(&cred->request_key_auth->sem);
+		}
+	}
+
+	/* no key - decide on the error we're going to go for */
+	if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY))
+		key_ref = ERR_PTR(-ENOKEY);
+	else if (err == ERR_PTR(-EACCES))
+		key_ref = ret;
+	else
+		key_ref = err;
+
+found:
+	return key_ref;
+}
+
+/*
+ * See if the key we're looking at is the target key.
+ */
+bool lookup_user_key_possessed(const struct key *key,
+			       const struct key_match_data *match_data)
+{
+	return key == match_data->raw_data;
+}
+
+/*
+ * Look up a key ID given us by userspace with a given permissions mask to get
+ * the key it refers to.
+ *
+ * Flags can be passed to request that special keyrings be created if referred
+ * to directly, to permit partially constructed keys to be found and to skip
+ * validity and permission checks on the found key.
+ *
+ * Returns a pointer to the key with an incremented usage count if successful;
+ * -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond
+ * to a key or the best found key was a negative key; -EKEYREVOKED or
+ * -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the
+ * found key doesn't grant the requested permit or the LSM denied access to it;
+ * or -ENOMEM if a special keyring couldn't be created.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
+ */
+key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
+			  key_perm_t perm)
+{
+	struct keyring_search_context ctx = {
+		.match_data.cmp		= lookup_user_key_possessed,
+		.match_data.lookup_type	= KEYRING_SEARCH_LOOKUP_DIRECT,
+		.flags			= KEYRING_SEARCH_NO_STATE_CHECK,
+	};
+	struct request_key_auth *rka;
+	struct key *key;
+	key_ref_t key_ref, skey_ref;
+	int ret;
+
+try_again:
+	ctx.cred = get_current_cred();
+	key_ref = ERR_PTR(-ENOKEY);
+
+	switch (id) {
+	case KEY_SPEC_THREAD_KEYRING:
+		if (!ctx.cred->thread_keyring) {
+			if (!(lflags & KEY_LOOKUP_CREATE))
+				goto error;
+
+			ret = install_thread_keyring();
+			if (ret < 0) {
+				key_ref = ERR_PTR(ret);
+				goto error;
+			}
+			goto reget_creds;
+		}
+
+		key = ctx.cred->thread_keyring;
+		__key_get(key);
+		key_ref = make_key_ref(key, 1);
+		break;
+
+	case KEY_SPEC_PROCESS_KEYRING:
+		if (!ctx.cred->process_keyring) {
+			if (!(lflags & KEY_LOOKUP_CREATE))
+				goto error;
+
+			ret = install_process_keyring();
+			if (ret < 0) {
+				key_ref = ERR_PTR(ret);
+				goto error;
+			}
+			goto reget_creds;
+		}
+
+		key = ctx.cred->process_keyring;
+		__key_get(key);
+		key_ref = make_key_ref(key, 1);
+		break;
+
+	case KEY_SPEC_SESSION_KEYRING:
+		if (!ctx.cred->session_keyring) {
+			/* always install a session keyring upon access if one
+			 * doesn't exist yet */
+			ret = install_user_keyrings();
+			if (ret < 0)
+				goto error;
+			if (lflags & KEY_LOOKUP_CREATE)
+				ret = join_session_keyring(NULL);
+			else
+				ret = install_session_keyring(
+					ctx.cred->user->session_keyring);
+
+			if (ret < 0)
+				goto error;
+			goto reget_creds;
+		} else if (ctx.cred->session_keyring ==
+			   ctx.cred->user->session_keyring &&
+			   lflags & KEY_LOOKUP_CREATE) {
+			ret = join_session_keyring(NULL);
+			if (ret < 0)
+				goto error;
+			goto reget_creds;
+		}
+
+		rcu_read_lock();
+		key = rcu_dereference(ctx.cred->session_keyring);
+		__key_get(key);
+		rcu_read_unlock();
+		key_ref = make_key_ref(key, 1);
+		break;
+
+	case KEY_SPEC_USER_KEYRING:
+		if (!ctx.cred->user->uid_keyring) {
+			ret = install_user_keyrings();
+			if (ret < 0)
+				goto error;
+		}
+
+		key = ctx.cred->user->uid_keyring;
+		__key_get(key);
+		key_ref = make_key_ref(key, 1);
+		break;
+
+	case KEY_SPEC_USER_SESSION_KEYRING:
+		if (!ctx.cred->user->session_keyring) {
+			ret = install_user_keyrings();
+			if (ret < 0)
+				goto error;
+		}
+
+		key = ctx.cred->user->session_keyring;
+		__key_get(key);
+		key_ref = make_key_ref(key, 1);
+		break;
+
+	case KEY_SPEC_GROUP_KEYRING:
+		/* group keyrings are not yet supported */
+		key_ref = ERR_PTR(-EINVAL);
+		goto error;
+
+	case KEY_SPEC_REQKEY_AUTH_KEY:
+		key = ctx.cred->request_key_auth;
+		if (!key)
+			goto error;
+
+		__key_get(key);
+		key_ref = make_key_ref(key, 1);
+		break;
+
+	case KEY_SPEC_REQUESTOR_KEYRING:
+		if (!ctx.cred->request_key_auth)
+			goto error;
+
+		down_read(&ctx.cred->request_key_auth->sem);
+		if (test_bit(KEY_FLAG_REVOKED,
+			     &ctx.cred->request_key_auth->flags)) {
+			key_ref = ERR_PTR(-EKEYREVOKED);
+			key = NULL;
+		} else {
+			rka = ctx.cred->request_key_auth->payload.data[0];
+			key = rka->dest_keyring;
+			__key_get(key);
+		}
+		up_read(&ctx.cred->request_key_auth->sem);
+		if (!key)
+			goto error;
+		key_ref = make_key_ref(key, 1);
+		break;
+
+	default:
+		key_ref = ERR_PTR(-EINVAL);
+		if (id < 1)
+			goto error;
+
+		key = key_lookup(id);
+		if (IS_ERR(key)) {
+			key_ref = ERR_CAST(key);
+			goto error;
+		}
+
+		key_ref = make_key_ref(key, 0);
+
+		/* check to see if we possess the key */
+		ctx.index_key.type		= key->type;
+		ctx.index_key.description	= key->description;
+		ctx.index_key.desc_len		= strlen(key->description);
+		ctx.match_data.raw_data		= key;
+		kdebug("check possessed");
+		skey_ref = search_process_keyrings(&ctx);
+		kdebug("possessed=%p", skey_ref);
+
+		if (!IS_ERR(skey_ref)) {
+			key_put(key);
+			key_ref = skey_ref;
+		}
+
+		break;
+	}
+
+	/* unlink does not use the nominated key in any way, so can skip all
+	 * the permission checks as it is only concerned with the keyring */
+	if (lflags & KEY_LOOKUP_FOR_UNLINK) {
+		ret = 0;
+		goto error;
+	}
+
+	if (!(lflags & KEY_LOOKUP_PARTIAL)) {
+		ret = wait_for_key_construction(key, true);
+		switch (ret) {
+		case -ERESTARTSYS:
+			goto invalid_key;
+		default:
+			if (perm)
+				goto invalid_key;
+		case 0:
+			break;
+		}
+	} else if (perm) {
+		ret = key_validate(key);
+		if (ret < 0)
+			goto invalid_key;
+	}
+
+	ret = -EIO;
+	if (!(lflags & KEY_LOOKUP_PARTIAL) &&
+	    key_read_state(key) == KEY_IS_UNINSTANTIATED)
+		goto invalid_key;
+
+	/* check the permissions */
+	ret = key_task_permission(key_ref, ctx.cred, perm);
+	if (ret < 0)
+		goto invalid_key;
+
+	key->last_used_at = current_kernel_time().tv_sec;
+
+error:
+	put_cred(ctx.cred);
+	return key_ref;
+
+invalid_key:
+	key_ref_put(key_ref);
+	key_ref = ERR_PTR(ret);
+	goto error;
+
+	/* if we attempted to install a keyring, then it may have caused new
+	 * creds to be installed */
+reget_creds:
+	put_cred(ctx.cred);
+	goto try_again;
+}
+
+/*
+ * Join the named keyring as the session keyring if possible else attempt to
+ * create a new one of that name and join that.
+ *
+ * If the name is NULL, an empty anonymous keyring will be installed as the
+ * session keyring.
+ *
+ * Named session keyrings are joined with a semaphore held to prevent the
+ * keyrings from going away whilst the attempt is made to going them and also
+ * to prevent a race in creating compatible session keyrings.
+ */
+long join_session_keyring(const char *name)
+{
+	const struct cred *old;
+	struct cred *new;
+	struct key *keyring;
+	long ret, serial;
+
+	new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+	old = current_cred();
+
+	/* if no name is provided, install an anonymous keyring */
+	if (!name) {
+		ret = install_session_keyring_to_cred(new, NULL);
+		if (ret < 0)
+			goto error;
+
+		serial = new->session_keyring->serial;
+		ret = commit_creds(new);
+		if (ret == 0)
+			ret = serial;
+		goto okay;
+	}
+
+	/* allow the user to join or create a named keyring */
+	mutex_lock(&key_session_mutex);
+
+	/* look for an existing keyring of this name */
+	keyring = find_keyring_by_name(name, false);
+	if (PTR_ERR(keyring) == -ENOKEY) {
+		/* not found - try and create a new one */
+		keyring = keyring_alloc(
+			name, old->uid, old->gid, old,
+			KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
+			KEY_ALLOC_IN_QUOTA, NULL);
+		if (IS_ERR(keyring)) {
+			ret = PTR_ERR(keyring);
+			goto error2;
+		}
+	} else if (IS_ERR(keyring)) {
+		ret = PTR_ERR(keyring);
+		goto error2;
+	} else if (keyring == new->session_keyring) {
+		key_put(keyring);
+		ret = 0;
+		goto error2;
+	}
+
+	/* we've got a keyring - now to install it */
+	ret = install_session_keyring_to_cred(new, keyring);
+	if (ret < 0)
+		goto error2;
+
+	commit_creds(new);
+	mutex_unlock(&key_session_mutex);
+
+	ret = keyring->serial;
+	key_put(keyring);
+okay:
+	return ret;
+
+error2:
+	mutex_unlock(&key_session_mutex);
+error:
+	abort_creds(new);
+	return ret;
+}
+
+/*
+ * Replace a process's session keyring on behalf of one of its children when
+ * the target  process is about to resume userspace execution.
+ */
+void key_change_session_keyring(struct callback_head *twork)
+{
+	const struct cred *old = current_cred();
+	struct cred *new = container_of(twork, struct cred, rcu);
+
+	if (unlikely(current->flags & PF_EXITING)) {
+		put_cred(new);
+		return;
+	}
+
+	new->  uid	= old->  uid;
+	new-> euid	= old-> euid;
+	new-> suid	= old-> suid;
+	new->fsuid	= old->fsuid;
+	new->  gid	= old->  gid;
+	new-> egid	= old-> egid;
+	new-> sgid	= old-> sgid;
+	new->fsgid	= old->fsgid;
+	new->user	= get_uid(old->user);
+	new->user_ns	= get_user_ns(old->user_ns);
+	new->group_info	= get_group_info(old->group_info);
+
+	new->securebits	= old->securebits;
+	new->cap_inheritable	= old->cap_inheritable;
+	new->cap_permitted	= old->cap_permitted;
+	new->cap_effective	= old->cap_effective;
+	new->cap_ambient	= old->cap_ambient;
+	new->cap_bset		= old->cap_bset;
+
+	new->jit_keyring	= old->jit_keyring;
+	new->thread_keyring	= key_get(old->thread_keyring);
+	new->process_keyring	= key_get(old->process_keyring);
+
+	security_transfer_creds(new, old);
+
+	commit_creds(new);
+}
+
+/*
+ * Make sure that root's user and user-session keyrings exist.
+ */
+static int __init init_root_keyring(void)
+{
+	return install_user_keyrings();
+}
+
+late_initcall(init_root_keyring);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
new file mode 100644
index 0000000..2ce7333
--- /dev/null
+++ b/security/keys/request_key.c
@@ -0,0 +1,725 @@
+/* Request a key from userspace
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * See Documentation/security/keys-request-key.txt
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kmod.h>
+#include <linux/err.h>
+#include <linux/keyctl.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+#define key_negative_timeout	60	/* default timeout on a negative key's existence */
+
+/**
+ * complete_request_key - Complete the construction of a key.
+ * @cons: The key construction record.
+ * @error: The success or failute of the construction.
+ *
+ * Complete the attempt to construct a key.  The key will be negated
+ * if an error is indicated.  The authorisation key will be revoked
+ * unconditionally.
+ */
+void complete_request_key(struct key_construction *cons, int error)
+{
+	kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error);
+
+	if (error < 0)
+		key_negate_and_link(cons->key, key_negative_timeout, NULL,
+				    cons->authkey);
+	else
+		key_revoke(cons->authkey);
+
+	key_put(cons->key);
+	key_put(cons->authkey);
+	kfree(cons);
+}
+EXPORT_SYMBOL(complete_request_key);
+
+/*
+ * Initialise a usermode helper that is going to have a specific session
+ * keyring.
+ *
+ * This is called in context of freshly forked kthread before kernel_execve(),
+ * so we can simply install the desired session_keyring at this point.
+ */
+static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
+{
+	struct key *keyring = info->data;
+
+	return install_session_keyring_to_cred(cred, keyring);
+}
+
+/*
+ * Clean up a usermode helper with session keyring.
+ */
+static void umh_keys_cleanup(struct subprocess_info *info)
+{
+	struct key *keyring = info->data;
+	key_put(keyring);
+}
+
+/*
+ * Call a usermode helper with a specific session keyring.
+ */
+static int call_usermodehelper_keys(char *path, char **argv, char **envp,
+					struct key *session_keyring, int wait)
+{
+	struct subprocess_info *info;
+
+	info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL,
+					  umh_keys_init, umh_keys_cleanup,
+					  session_keyring);
+	if (!info)
+		return -ENOMEM;
+
+	key_get(session_keyring);
+	return call_usermodehelper_exec(info, wait);
+}
+
+/*
+ * Request userspace finish the construction of a key
+ * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
+ */
+static int call_sbin_request_key(struct key_construction *cons,
+				 const char *op,
+				 void *aux)
+{
+	const struct cred *cred = current_cred();
+	key_serial_t prkey, sskey;
+	struct key *key = cons->key, *authkey = cons->authkey, *keyring,
+		*session;
+	char *argv[9], *envp[3], uid_str[12], gid_str[12];
+	char key_str[12], keyring_str[3][12];
+	char desc[20];
+	int ret, i;
+
+	kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
+
+	ret = install_user_keyrings();
+	if (ret < 0)
+		goto error_alloc;
+
+	/* allocate a new session keyring */
+	sprintf(desc, "_req.%u", key->serial);
+
+	cred = get_current_cred();
+	keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
+				KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
+				KEY_ALLOC_QUOTA_OVERRUN, NULL);
+	put_cred(cred);
+	if (IS_ERR(keyring)) {
+		ret = PTR_ERR(keyring);
+		goto error_alloc;
+	}
+
+	/* attach the auth key to the session keyring */
+	ret = key_link(keyring, authkey);
+	if (ret < 0)
+		goto error_link;
+
+	/* record the UID and GID */
+	sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid));
+	sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid));
+
+	/* we say which key is under construction */
+	sprintf(key_str, "%d", key->serial);
+
+	/* we specify the process's default keyrings */
+	sprintf(keyring_str[0], "%d",
+		cred->thread_keyring ? cred->thread_keyring->serial : 0);
+
+	prkey = 0;
+	if (cred->process_keyring)
+		prkey = cred->process_keyring->serial;
+	sprintf(keyring_str[1], "%d", prkey);
+
+	rcu_read_lock();
+	session = rcu_dereference(cred->session_keyring);
+	if (!session)
+		session = cred->user->session_keyring;
+	sskey = session->serial;
+	rcu_read_unlock();
+
+	sprintf(keyring_str[2], "%d", sskey);
+
+	/* set up a minimal environment */
+	i = 0;
+	envp[i++] = "HOME=/";
+	envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+	envp[i] = NULL;
+
+	/* set up the argument list */
+	i = 0;
+	argv[i++] = "/sbin/request-key";
+	argv[i++] = (char *) op;
+	argv[i++] = key_str;
+	argv[i++] = uid_str;
+	argv[i++] = gid_str;
+	argv[i++] = keyring_str[0];
+	argv[i++] = keyring_str[1];
+	argv[i++] = keyring_str[2];
+	argv[i] = NULL;
+
+	/* do it */
+	ret = call_usermodehelper_keys(argv[0], argv, envp, keyring,
+				       UMH_WAIT_PROC);
+	kdebug("usermode -> 0x%x", ret);
+	if (ret >= 0) {
+		/* ret is the exit/wait code */
+		if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) ||
+		    key_validate(key) < 0)
+			ret = -ENOKEY;
+		else
+			/* ignore any errors from userspace if the key was
+			 * instantiated */
+			ret = 0;
+	}
+
+error_link:
+	key_put(keyring);
+
+error_alloc:
+	complete_request_key(cons, ret);
+	kleave(" = %d", ret);
+	return ret;
+}
+
+/*
+ * Call out to userspace for key construction.
+ *
+ * Program failure is ignored in favour of key status.
+ */
+static int construct_key(struct key *key, const void *callout_info,
+			 size_t callout_len, void *aux,
+			 struct key *dest_keyring)
+{
+	struct key_construction *cons;
+	request_key_actor_t actor;
+	struct key *authkey;
+	int ret;
+
+	kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
+
+	cons = kmalloc(sizeof(*cons), GFP_KERNEL);
+	if (!cons)
+		return -ENOMEM;
+
+	/* allocate an authorisation key */
+	authkey = request_key_auth_new(key, callout_info, callout_len,
+				       dest_keyring);
+	if (IS_ERR(authkey)) {
+		kfree(cons);
+		ret = PTR_ERR(authkey);
+		authkey = NULL;
+	} else {
+		cons->authkey = key_get(authkey);
+		cons->key = key_get(key);
+
+		/* make the call */
+		actor = call_sbin_request_key;
+		if (key->type->request_key)
+			actor = key->type->request_key;
+
+		ret = actor(cons, "create", aux);
+
+		/* check that the actor called complete_request_key() prior to
+		 * returning an error */
+		WARN_ON(ret < 0 &&
+			!test_bit(KEY_FLAG_REVOKED, &authkey->flags));
+		key_put(authkey);
+	}
+
+	kleave(" = %d", ret);
+	return ret;
+}
+
+/*
+ * Get the appropriate destination keyring for the request.
+ *
+ * The keyring selected is returned with an extra reference upon it which the
+ * caller must release.
+ */
+static void construct_get_dest_keyring(struct key **_dest_keyring)
+{
+	struct request_key_auth *rka;
+	const struct cred *cred = current_cred();
+	struct key *dest_keyring = *_dest_keyring, *authkey;
+
+	kenter("%p", dest_keyring);
+
+	/* find the appropriate keyring */
+	if (dest_keyring) {
+		/* the caller supplied one */
+		key_get(dest_keyring);
+	} else {
+		/* use a default keyring; falling through the cases until we
+		 * find one that we actually have */
+		switch (cred->jit_keyring) {
+		case KEY_REQKEY_DEFL_DEFAULT:
+		case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
+			if (cred->request_key_auth) {
+				authkey = cred->request_key_auth;
+				down_read(&authkey->sem);
+				rka = authkey->payload.data[0];
+				if (!test_bit(KEY_FLAG_REVOKED,
+					      &authkey->flags))
+					dest_keyring =
+						key_get(rka->dest_keyring);
+				up_read(&authkey->sem);
+				if (dest_keyring)
+					break;
+			}
+
+		case KEY_REQKEY_DEFL_THREAD_KEYRING:
+			dest_keyring = key_get(cred->thread_keyring);
+			if (dest_keyring)
+				break;
+
+		case KEY_REQKEY_DEFL_PROCESS_KEYRING:
+			dest_keyring = key_get(cred->process_keyring);
+			if (dest_keyring)
+				break;
+
+		case KEY_REQKEY_DEFL_SESSION_KEYRING:
+			rcu_read_lock();
+			dest_keyring = key_get(
+				rcu_dereference(cred->session_keyring));
+			rcu_read_unlock();
+
+			if (dest_keyring)
+				break;
+
+		case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
+			dest_keyring =
+				key_get(cred->user->session_keyring);
+			break;
+
+		case KEY_REQKEY_DEFL_USER_KEYRING:
+			dest_keyring = key_get(cred->user->uid_keyring);
+			break;
+
+		case KEY_REQKEY_DEFL_GROUP_KEYRING:
+		default:
+			BUG();
+		}
+	}
+
+	*_dest_keyring = dest_keyring;
+	kleave(" [dk %d]", key_serial(dest_keyring));
+	return;
+}
+
+/*
+ * Allocate a new key in under-construction state and attempt to link it in to
+ * the requested keyring.
+ *
+ * May return a key that's already under construction instead if there was a
+ * race between two thread calling request_key().
+ */
+static int construct_alloc_key(struct keyring_search_context *ctx,
+			       struct key *dest_keyring,
+			       unsigned long flags,
+			       struct key_user *user,
+			       struct key **_key)
+{
+	struct assoc_array_edit *edit;
+	struct key *key;
+	key_perm_t perm;
+	key_ref_t key_ref;
+	int ret;
+
+	kenter("%s,%s,,,",
+	       ctx->index_key.type->name, ctx->index_key.description);
+
+	*_key = NULL;
+	mutex_lock(&user->cons_lock);
+
+	perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+	perm |= KEY_USR_VIEW;
+	if (ctx->index_key.type->read)
+		perm |= KEY_POS_READ;
+	if (ctx->index_key.type == &key_type_keyring ||
+	    ctx->index_key.type->update)
+		perm |= KEY_POS_WRITE;
+
+	key = key_alloc(ctx->index_key.type, ctx->index_key.description,
+			ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
+			perm, flags);
+	if (IS_ERR(key))
+		goto alloc_failed;
+
+	set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
+
+	if (dest_keyring) {
+		ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
+		if (ret < 0)
+			goto link_prealloc_failed;
+	}
+
+	/* attach the key to the destination keyring under lock, but we do need
+	 * to do another check just in case someone beat us to it whilst we
+	 * waited for locks */
+	mutex_lock(&key_construction_mutex);
+
+	key_ref = search_process_keyrings(ctx);
+	if (!IS_ERR(key_ref))
+		goto key_already_present;
+
+	if (dest_keyring)
+		__key_link(key, &edit);
+
+	mutex_unlock(&key_construction_mutex);
+	if (dest_keyring)
+		__key_link_end(dest_keyring, &ctx->index_key, edit);
+	mutex_unlock(&user->cons_lock);
+	*_key = key;
+	kleave(" = 0 [%d]", key_serial(key));
+	return 0;
+
+	/* the key is now present - we tell the caller that we found it by
+	 * returning -EINPROGRESS  */
+key_already_present:
+	key_put(key);
+	mutex_unlock(&key_construction_mutex);
+	key = key_ref_to_ptr(key_ref);
+	if (dest_keyring) {
+		ret = __key_link_check_live_key(dest_keyring, key);
+		if (ret == 0)
+			__key_link(key, &edit);
+		__key_link_end(dest_keyring, &ctx->index_key, edit);
+		if (ret < 0)
+			goto link_check_failed;
+	}
+	mutex_unlock(&user->cons_lock);
+	*_key = key;
+	kleave(" = -EINPROGRESS [%d]", key_serial(key));
+	return -EINPROGRESS;
+
+link_check_failed:
+	mutex_unlock(&user->cons_lock);
+	key_put(key);
+	kleave(" = %d [linkcheck]", ret);
+	return ret;
+
+link_prealloc_failed:
+	mutex_unlock(&user->cons_lock);
+	key_put(key);
+	kleave(" = %d [prelink]", ret);
+	return ret;
+
+alloc_failed:
+	mutex_unlock(&user->cons_lock);
+	kleave(" = %ld", PTR_ERR(key));
+	return PTR_ERR(key);
+}
+
+/*
+ * Commence key construction.
+ */
+static struct key *construct_key_and_link(struct keyring_search_context *ctx,
+					  const char *callout_info,
+					  size_t callout_len,
+					  void *aux,
+					  struct key *dest_keyring,
+					  unsigned long flags)
+{
+	struct key_user *user;
+	struct key *key;
+	int ret;
+
+	kenter("");
+
+	if (ctx->index_key.type == &key_type_keyring)
+		return ERR_PTR(-EPERM);
+	
+	user = key_user_lookup(current_fsuid());
+	if (!user)
+		return ERR_PTR(-ENOMEM);
+
+	construct_get_dest_keyring(&dest_keyring);
+
+	ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
+	key_user_put(user);
+
+	if (ret == 0) {
+		ret = construct_key(key, callout_info, callout_len, aux,
+				    dest_keyring);
+		if (ret < 0) {
+			kdebug("cons failed");
+			goto construction_failed;
+		}
+	} else if (ret == -EINPROGRESS) {
+		ret = 0;
+	} else {
+		goto couldnt_alloc_key;
+	}
+
+	key_put(dest_keyring);
+	kleave(" = key %d", key_serial(key));
+	return key;
+
+construction_failed:
+	key_negate_and_link(key, key_negative_timeout, NULL, NULL);
+	key_put(key);
+couldnt_alloc_key:
+	key_put(dest_keyring);
+	kleave(" = %d", ret);
+	return ERR_PTR(ret);
+}
+
+/**
+ * request_key_and_link - Request a key and cache it in a keyring.
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ * @dest_keyring: Where to cache the key.
+ * @flags: Flags to key_alloc().
+ *
+ * A key matching the specified criteria is searched for in the process's
+ * keyrings and returned with its usage count incremented if found.  Otherwise,
+ * if callout_info is not NULL, a key will be allocated and some service
+ * (probably in userspace) will be asked to instantiate it.
+ *
+ * If successfully found or created, the key will be linked to the destination
+ * keyring if one is provided.
+ *
+ * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED
+ * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was
+ * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT
+ * if insufficient key quota was available to create a new key; or -ENOMEM if
+ * insufficient memory was available.
+ *
+ * If the returned key was created, then it may still be under construction,
+ * and wait_for_key_construction() should be used to wait for that to complete.
+ */
+struct key *request_key_and_link(struct key_type *type,
+				 const char *description,
+				 const void *callout_info,
+				 size_t callout_len,
+				 void *aux,
+				 struct key *dest_keyring,
+				 unsigned long flags)
+{
+	struct keyring_search_context ctx = {
+		.index_key.type		= type,
+		.index_key.description	= description,
+		.cred			= current_cred(),
+		.match_data.cmp		= key_default_cmp,
+		.match_data.raw_data	= description,
+		.match_data.lookup_type	= KEYRING_SEARCH_LOOKUP_DIRECT,
+		.flags			= (KEYRING_SEARCH_DO_STATE_CHECK |
+					   KEYRING_SEARCH_SKIP_EXPIRED),
+	};
+	struct key *key;
+	key_ref_t key_ref;
+	int ret;
+
+	kenter("%s,%s,%p,%zu,%p,%p,%lx",
+	       ctx.index_key.type->name, ctx.index_key.description,
+	       callout_info, callout_len, aux, dest_keyring, flags);
+
+	if (type->match_preparse) {
+		ret = type->match_preparse(&ctx.match_data);
+		if (ret < 0) {
+			key = ERR_PTR(ret);
+			goto error;
+		}
+	}
+
+	/* search all the process keyrings for a key */
+	key_ref = search_process_keyrings(&ctx);
+
+	if (!IS_ERR(key_ref)) {
+		key = key_ref_to_ptr(key_ref);
+		if (dest_keyring) {
+			construct_get_dest_keyring(&dest_keyring);
+			ret = key_link(dest_keyring, key);
+			key_put(dest_keyring);
+			if (ret < 0) {
+				key_put(key);
+				key = ERR_PTR(ret);
+				goto error_free;
+			}
+		}
+	} else if (PTR_ERR(key_ref) != -EAGAIN) {
+		key = ERR_CAST(key_ref);
+	} else  {
+		/* the search failed, but the keyrings were searchable, so we
+		 * should consult userspace if we can */
+		key = ERR_PTR(-ENOKEY);
+		if (!callout_info)
+			goto error_free;
+
+		key = construct_key_and_link(&ctx, callout_info, callout_len,
+					     aux, dest_keyring, flags);
+	}
+
+error_free:
+	if (type->match_free)
+		type->match_free(&ctx.match_data);
+error:
+	kleave(" = %p", key);
+	return key;
+}
+
+/**
+ * wait_for_key_construction - Wait for construction of a key to complete
+ * @key: The key being waited for.
+ * @intr: Whether to wait interruptibly.
+ *
+ * Wait for a key to finish being constructed.
+ *
+ * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY
+ * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was
+ * revoked or expired.
+ */
+int wait_for_key_construction(struct key *key, bool intr)
+{
+	int ret;
+
+	ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT,
+			  intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+	if (ret)
+		return -ERESTARTSYS;
+	ret = key_read_state(key);
+	if (ret < 0)
+		return ret;
+	return key_validate(key);
+}
+EXPORT_SYMBOL(wait_for_key_construction);
+
+/**
+ * request_key - Request a key and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota,
+ * the callout_info must be a NUL-terminated string and no auxiliary data can
+ * be passed.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
+ */
+struct key *request_key(struct key_type *type,
+			const char *description,
+			const char *callout_info)
+{
+	struct key *key;
+	size_t callout_len = 0;
+	int ret;
+
+	if (callout_info)
+		callout_len = strlen(callout_info);
+	key = request_key_and_link(type, description, callout_info, callout_len,
+				   NULL, NULL, KEY_ALLOC_IN_QUOTA);
+	if (!IS_ERR(key)) {
+		ret = wait_for_key_construction(key, false);
+		if (ret < 0) {
+			key_put(key);
+			return ERR_PTR(ret);
+		}
+	}
+	return key;
+}
+EXPORT_SYMBOL(request_key);
+
+/**
+ * request_key_with_auxdata - Request a key with auxiliary data for the upcaller
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
+ */
+struct key *request_key_with_auxdata(struct key_type *type,
+				     const char *description,
+				     const void *callout_info,
+				     size_t callout_len,
+				     void *aux)
+{
+	struct key *key;
+	int ret;
+
+	key = request_key_and_link(type, description, callout_info, callout_len,
+				   aux, NULL, KEY_ALLOC_IN_QUOTA);
+	if (!IS_ERR(key)) {
+		ret = wait_for_key_construction(key, false);
+		if (ret < 0) {
+			key_put(key);
+			return ERR_PTR(ret);
+		}
+	}
+	return key;
+}
+EXPORT_SYMBOL(request_key_with_auxdata);
+
+/*
+ * request_key_async - Request a key (allow async construction)
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota and
+ * no auxiliary data can be passed.
+ *
+ * The caller should call wait_for_key_construction() to wait for the
+ * completion of the returned key if it is still undergoing construction.
+ */
+struct key *request_key_async(struct key_type *type,
+			      const char *description,
+			      const void *callout_info,
+			      size_t callout_len)
+{
+	return request_key_and_link(type, description, callout_info,
+				    callout_len, NULL, NULL,
+				    KEY_ALLOC_IN_QUOTA);
+}
+EXPORT_SYMBOL(request_key_async);
+
+/*
+ * request a key with auxiliary data for the upcaller (allow async construction)
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * The caller should call wait_for_key_construction() to wait for the
+ * completion of the returned key if it is still undergoing construction.
+ */
+struct key *request_key_async_with_auxdata(struct key_type *type,
+					   const char *description,
+					   const void *callout_info,
+					   size_t callout_len,
+					   void *aux)
+{
+	return request_key_and_link(type, description, callout_info,
+				    callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA);
+}
+EXPORT_SYMBOL(request_key_async_with_auxdata);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
new file mode 100644
index 0000000..217775f
--- /dev/null
+++ b/security/keys/request_key_auth.c
@@ -0,0 +1,276 @@
+/* Request key authorisation token key definition.
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * See Documentation/security/keys-request-key.txt
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+#include <keys/user-type.h>
+
+static int request_key_auth_preparse(struct key_preparsed_payload *);
+static void request_key_auth_free_preparse(struct key_preparsed_payload *);
+static int request_key_auth_instantiate(struct key *,
+					struct key_preparsed_payload *);
+static void request_key_auth_describe(const struct key *, struct seq_file *);
+static void request_key_auth_revoke(struct key *);
+static void request_key_auth_destroy(struct key *);
+static long request_key_auth_read(const struct key *, char __user *, size_t);
+
+/*
+ * The request-key authorisation key type definition.
+ */
+struct key_type key_type_request_key_auth = {
+	.name		= ".request_key_auth",
+	.def_datalen	= sizeof(struct request_key_auth),
+	.preparse	= request_key_auth_preparse,
+	.free_preparse	= request_key_auth_free_preparse,
+	.instantiate	= request_key_auth_instantiate,
+	.describe	= request_key_auth_describe,
+	.revoke		= request_key_auth_revoke,
+	.destroy	= request_key_auth_destroy,
+	.read		= request_key_auth_read,
+};
+
+static int request_key_auth_preparse(struct key_preparsed_payload *prep)
+{
+	return 0;
+}
+
+static void request_key_auth_free_preparse(struct key_preparsed_payload *prep)
+{
+}
+
+/*
+ * Instantiate a request-key authorisation key.
+ */
+static int request_key_auth_instantiate(struct key *key,
+					struct key_preparsed_payload *prep)
+{
+	key->payload.data[0] = (struct request_key_auth *)prep->data;
+	return 0;
+}
+
+/*
+ * Describe an authorisation token.
+ */
+static void request_key_auth_describe(const struct key *key,
+				      struct seq_file *m)
+{
+	struct request_key_auth *rka = key->payload.data[0];
+
+	seq_puts(m, "key:");
+	seq_puts(m, key->description);
+	if (key_is_positive(key))
+		seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
+}
+
+/*
+ * Read the callout_info data (retrieves the callout information).
+ * - the key's semaphore is read-locked
+ */
+static long request_key_auth_read(const struct key *key,
+				  char __user *buffer, size_t buflen)
+{
+	struct request_key_auth *rka = key->payload.data[0];
+	size_t datalen;
+	long ret;
+
+	datalen = rka->callout_len;
+	ret = datalen;
+
+	/* we can return the data as is */
+	if (buffer && buflen > 0) {
+		if (buflen > datalen)
+			buflen = datalen;
+
+		if (copy_to_user(buffer, rka->callout_info, buflen) != 0)
+			ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+/*
+ * Handle revocation of an authorisation token key.
+ *
+ * Called with the key sem write-locked.
+ */
+static void request_key_auth_revoke(struct key *key)
+{
+	struct request_key_auth *rka = key->payload.data[0];
+
+	kenter("{%d}", key->serial);
+
+	if (rka->cred) {
+		put_cred(rka->cred);
+		rka->cred = NULL;
+	}
+}
+
+/*
+ * Destroy an instantiation authorisation token key.
+ */
+static void request_key_auth_destroy(struct key *key)
+{
+	struct request_key_auth *rka = key->payload.data[0];
+
+	kenter("{%d}", key->serial);
+
+	if (rka->cred) {
+		put_cred(rka->cred);
+		rka->cred = NULL;
+	}
+
+	key_put(rka->target_key);
+	key_put(rka->dest_keyring);
+	kfree(rka->callout_info);
+	kfree(rka);
+}
+
+/*
+ * Create an authorisation token for /sbin/request-key or whoever to gain
+ * access to the caller's security data.
+ */
+struct key *request_key_auth_new(struct key *target, const void *callout_info,
+				 size_t callout_len, struct key *dest_keyring)
+{
+	struct request_key_auth *rka, *irka;
+	const struct cred *cred = current->cred;
+	struct key *authkey = NULL;
+	char desc[20];
+	int ret;
+
+	kenter("%d,", target->serial);
+
+	/* allocate a auth record */
+	rka = kmalloc(sizeof(*rka), GFP_KERNEL);
+	if (!rka) {
+		kleave(" = -ENOMEM");
+		return ERR_PTR(-ENOMEM);
+	}
+	rka->callout_info = kmalloc(callout_len, GFP_KERNEL);
+	if (!rka->callout_info) {
+		kleave(" = -ENOMEM");
+		kfree(rka);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* see if the calling process is already servicing the key request of
+	 * another process */
+	if (cred->request_key_auth) {
+		/* it is - use that instantiation context here too */
+		down_read(&cred->request_key_auth->sem);
+
+		/* if the auth key has been revoked, then the key we're
+		 * servicing is already instantiated */
+		if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags))
+			goto auth_key_revoked;
+
+		irka = cred->request_key_auth->payload.data[0];
+		rka->cred = get_cred(irka->cred);
+		rka->pid = irka->pid;
+
+		up_read(&cred->request_key_auth->sem);
+	}
+	else {
+		/* it isn't - use this process as the context */
+		rka->cred = get_cred(cred);
+		rka->pid = current->pid;
+	}
+
+	rka->target_key = key_get(target);
+	rka->dest_keyring = key_get(dest_keyring);
+	memcpy(rka->callout_info, callout_info, callout_len);
+	rka->callout_len = callout_len;
+
+	/* allocate the auth key */
+	sprintf(desc, "%x", target->serial);
+
+	authkey = key_alloc(&key_type_request_key_auth, desc,
+			    cred->fsuid, cred->fsgid, cred,
+			    KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
+			    KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA);
+	if (IS_ERR(authkey)) {
+		ret = PTR_ERR(authkey);
+		goto error_alloc;
+	}
+
+	/* construct the auth key */
+	ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL);
+	if (ret < 0)
+		goto error_inst;
+
+	kleave(" = {%d,%d}", authkey->serial, atomic_read(&authkey->usage));
+	return authkey;
+
+auth_key_revoked:
+	up_read(&cred->request_key_auth->sem);
+	kfree(rka->callout_info);
+	kfree(rka);
+	kleave("= -EKEYREVOKED");
+	return ERR_PTR(-EKEYREVOKED);
+
+error_inst:
+	key_revoke(authkey);
+	key_put(authkey);
+error_alloc:
+	key_put(rka->target_key);
+	key_put(rka->dest_keyring);
+	kfree(rka->callout_info);
+	kfree(rka);
+	kleave("= %d", ret);
+	return ERR_PTR(ret);
+}
+
+/*
+ * Search the current process's keyrings for the authorisation key for
+ * instantiation of a key.
+ */
+struct key *key_get_instantiation_authkey(key_serial_t target_id)
+{
+	char description[16];
+	struct keyring_search_context ctx = {
+		.index_key.type		= &key_type_request_key_auth,
+		.index_key.description	= description,
+		.cred			= current_cred(),
+		.match_data.cmp		= key_default_cmp,
+		.match_data.raw_data	= description,
+		.match_data.lookup_type	= KEYRING_SEARCH_LOOKUP_DIRECT,
+		.flags			= KEYRING_SEARCH_DO_STATE_CHECK,
+	};
+	struct key *authkey;
+	key_ref_t authkey_ref;
+
+	sprintf(description, "%x", target_id);
+
+	authkey_ref = search_process_keyrings(&ctx);
+
+	if (IS_ERR(authkey_ref)) {
+		authkey = ERR_CAST(authkey_ref);
+		if (authkey == ERR_PTR(-EAGAIN))
+			authkey = ERR_PTR(-ENOKEY);
+		goto error;
+	}
+
+	authkey = key_ref_to_ptr(authkey_ref);
+	if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) {
+		key_put(authkey);
+		authkey = ERR_PTR(-EKEYREVOKED);
+	}
+
+error:
+	return authkey;
+}
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
new file mode 100644
index 0000000..b68faa1
--- /dev/null
+++ b/security/keys/sysctl.c
@@ -0,0 +1,76 @@
+/* Key management controls
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/key.h>
+#include <linux/sysctl.h>
+#include "internal.h"
+
+static const int zero, one = 1, max = INT_MAX;
+
+struct ctl_table key_sysctls[] = {
+	{
+		.procname = "maxkeys",
+		.data = &key_quota_maxkeys,
+		.maxlen = sizeof(unsigned),
+		.mode = 0644,
+		.proc_handler = proc_dointvec_minmax,
+		.extra1 = (void *) &one,
+		.extra2 = (void *) &max,
+	},
+	{
+		.procname = "maxbytes",
+		.data = &key_quota_maxbytes,
+		.maxlen = sizeof(unsigned),
+		.mode = 0644,
+		.proc_handler = proc_dointvec_minmax,
+		.extra1 = (void *) &one,
+		.extra2 = (void *) &max,
+	},
+	{
+		.procname = "root_maxkeys",
+		.data = &key_quota_root_maxkeys,
+		.maxlen = sizeof(unsigned),
+		.mode = 0644,
+		.proc_handler = proc_dointvec_minmax,
+		.extra1 = (void *) &one,
+		.extra2 = (void *) &max,
+	},
+	{
+		.procname = "root_maxbytes",
+		.data = &key_quota_root_maxbytes,
+		.maxlen = sizeof(unsigned),
+		.mode = 0644,
+		.proc_handler = proc_dointvec_minmax,
+		.extra1 = (void *) &one,
+		.extra2 = (void *) &max,
+	},
+	{
+		.procname = "gc_delay",
+		.data = &key_gc_delay,
+		.maxlen = sizeof(unsigned),
+		.mode = 0644,
+		.proc_handler = proc_dointvec_minmax,
+		.extra1 = (void *) &zero,
+		.extra2 = (void *) &max,
+	},
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+	{
+		.procname = "persistent_keyring_expiry",
+		.data = &persistent_keyring_expiry,
+		.maxlen = sizeof(unsigned),
+		.mode = 0644,
+		.proc_handler = proc_dointvec_minmax,
+		.extra1 = (void *) &zero,
+		.extra2 = (void *) &max,
+	},
+#endif
+	{ }
+};
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
new file mode 100644
index 0000000..214ae2d
--- /dev/null
+++ b/security/keys/trusted.c
@@ -0,0 +1,1190 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Author:
+ * David Safford <safford@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/security/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <linux/key-type.h>
+#include <linux/rcupdate.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <linux/capability.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+
+#include "trusted.h"
+
+static const char hmac_alg[] = "hmac(sha1)";
+static const char hash_alg[] = "sha1";
+
+struct sdesc {
+	struct shash_desc shash;
+	char ctx[];
+};
+
+static struct crypto_shash *hashalg;
+static struct crypto_shash *hmacalg;
+
+static struct sdesc *init_sdesc(struct crypto_shash *alg)
+{
+	struct sdesc *sdesc;
+	int size;
+
+	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+	sdesc = kmalloc(size, GFP_KERNEL);
+	if (!sdesc)
+		return ERR_PTR(-ENOMEM);
+	sdesc->shash.tfm = alg;
+	sdesc->shash.flags = 0x0;
+	return sdesc;
+}
+
+static int TSS_sha1(const unsigned char *data, unsigned int datalen,
+		    unsigned char *digest)
+{
+	struct sdesc *sdesc;
+	int ret;
+
+	sdesc = init_sdesc(hashalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
+	kzfree(sdesc);
+	return ret;
+}
+
+static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
+		       unsigned int keylen, ...)
+{
+	struct sdesc *sdesc;
+	va_list argp;
+	unsigned int dlen;
+	unsigned char *data;
+	int ret;
+
+	sdesc = init_sdesc(hmacalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hmac_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	ret = crypto_shash_setkey(hmacalg, key, keylen);
+	if (ret < 0)
+		goto out;
+	ret = crypto_shash_init(&sdesc->shash);
+	if (ret < 0)
+		goto out;
+
+	va_start(argp, keylen);
+	for (;;) {
+		dlen = va_arg(argp, unsigned int);
+		if (dlen == 0)
+			break;
+		data = va_arg(argp, unsigned char *);
+		if (data == NULL) {
+			ret = -EINVAL;
+			break;
+		}
+		ret = crypto_shash_update(&sdesc->shash, data, dlen);
+		if (ret < 0)
+			break;
+	}
+	va_end(argp);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, digest);
+out:
+	kzfree(sdesc);
+	return ret;
+}
+
+/*
+ * calculate authorization info fields to send to TPM
+ */
+static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
+			unsigned int keylen, unsigned char *h1,
+			unsigned char *h2, unsigned char h3, ...)
+{
+	unsigned char paramdigest[SHA1_DIGEST_SIZE];
+	struct sdesc *sdesc;
+	unsigned int dlen;
+	unsigned char *data;
+	unsigned char c;
+	int ret;
+	va_list argp;
+
+	sdesc = init_sdesc(hashalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	c = h3;
+	ret = crypto_shash_init(&sdesc->shash);
+	if (ret < 0)
+		goto out;
+	va_start(argp, h3);
+	for (;;) {
+		dlen = va_arg(argp, unsigned int);
+		if (dlen == 0)
+			break;
+		data = va_arg(argp, unsigned char *);
+		if (!data) {
+			ret = -EINVAL;
+			break;
+		}
+		ret = crypto_shash_update(&sdesc->shash, data, dlen);
+		if (ret < 0)
+			break;
+	}
+	va_end(argp);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (!ret)
+		ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE,
+				  paramdigest, TPM_NONCE_SIZE, h1,
+				  TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
+out:
+	kzfree(sdesc);
+	return ret;
+}
+
+/*
+ * verify the AUTH1_COMMAND (Seal) result from TPM
+ */
+static int TSS_checkhmac1(unsigned char *buffer,
+			  const uint32_t command,
+			  const unsigned char *ononce,
+			  const unsigned char *key,
+			  unsigned int keylen, ...)
+{
+	uint32_t bufsize;
+	uint16_t tag;
+	uint32_t ordinal;
+	uint32_t result;
+	unsigned char *enonce;
+	unsigned char *continueflag;
+	unsigned char *authdata;
+	unsigned char testhmac[SHA1_DIGEST_SIZE];
+	unsigned char paramdigest[SHA1_DIGEST_SIZE];
+	struct sdesc *sdesc;
+	unsigned int dlen;
+	unsigned int dpos;
+	va_list argp;
+	int ret;
+
+	bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
+	tag = LOAD16(buffer, 0);
+	ordinal = command;
+	result = LOAD32N(buffer, TPM_RETURN_OFFSET);
+	if (tag == TPM_TAG_RSP_COMMAND)
+		return 0;
+	if (tag != TPM_TAG_RSP_AUTH1_COMMAND)
+		return -EINVAL;
+	authdata = buffer + bufsize - SHA1_DIGEST_SIZE;
+	continueflag = authdata - 1;
+	enonce = continueflag - TPM_NONCE_SIZE;
+
+	sdesc = init_sdesc(hashalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+	ret = crypto_shash_init(&sdesc->shash);
+	if (ret < 0)
+		goto out;
+	ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
+				  sizeof result);
+	if (ret < 0)
+		goto out;
+	ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
+				  sizeof ordinal);
+	if (ret < 0)
+		goto out;
+	va_start(argp, keylen);
+	for (;;) {
+		dlen = va_arg(argp, unsigned int);
+		if (dlen == 0)
+			break;
+		dpos = va_arg(argp, unsigned int);
+		ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
+		if (ret < 0)
+			break;
+	}
+	va_end(argp);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (ret < 0)
+		goto out;
+
+	ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest,
+			  TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce,
+			  1, continueflag, 0, 0);
+	if (ret < 0)
+		goto out;
+
+	if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
+		ret = -EINVAL;
+out:
+	kzfree(sdesc);
+	return ret;
+}
+
+/*
+ * verify the AUTH2_COMMAND (unseal) result from TPM
+ */
+static int TSS_checkhmac2(unsigned char *buffer,
+			  const uint32_t command,
+			  const unsigned char *ononce,
+			  const unsigned char *key1,
+			  unsigned int keylen1,
+			  const unsigned char *key2,
+			  unsigned int keylen2, ...)
+{
+	uint32_t bufsize;
+	uint16_t tag;
+	uint32_t ordinal;
+	uint32_t result;
+	unsigned char *enonce1;
+	unsigned char *continueflag1;
+	unsigned char *authdata1;
+	unsigned char *enonce2;
+	unsigned char *continueflag2;
+	unsigned char *authdata2;
+	unsigned char testhmac1[SHA1_DIGEST_SIZE];
+	unsigned char testhmac2[SHA1_DIGEST_SIZE];
+	unsigned char paramdigest[SHA1_DIGEST_SIZE];
+	struct sdesc *sdesc;
+	unsigned int dlen;
+	unsigned int dpos;
+	va_list argp;
+	int ret;
+
+	bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
+	tag = LOAD16(buffer, 0);
+	ordinal = command;
+	result = LOAD32N(buffer, TPM_RETURN_OFFSET);
+
+	if (tag == TPM_TAG_RSP_COMMAND)
+		return 0;
+	if (tag != TPM_TAG_RSP_AUTH2_COMMAND)
+		return -EINVAL;
+	authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1
+			+ SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE);
+	authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE);
+	continueflag1 = authdata1 - 1;
+	continueflag2 = authdata2 - 1;
+	enonce1 = continueflag1 - TPM_NONCE_SIZE;
+	enonce2 = continueflag2 - TPM_NONCE_SIZE;
+
+	sdesc = init_sdesc(hashalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+	ret = crypto_shash_init(&sdesc->shash);
+	if (ret < 0)
+		goto out;
+	ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
+				  sizeof result);
+	if (ret < 0)
+		goto out;
+	ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
+				  sizeof ordinal);
+	if (ret < 0)
+		goto out;
+
+	va_start(argp, keylen2);
+	for (;;) {
+		dlen = va_arg(argp, unsigned int);
+		if (dlen == 0)
+			break;
+		dpos = va_arg(argp, unsigned int);
+		ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
+		if (ret < 0)
+			break;
+	}
+	va_end(argp);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (ret < 0)
+		goto out;
+
+	ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE,
+			  paramdigest, TPM_NONCE_SIZE, enonce1,
+			  TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0);
+	if (ret < 0)
+		goto out;
+	if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) {
+		ret = -EINVAL;
+		goto out;
+	}
+	ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE,
+			  paramdigest, TPM_NONCE_SIZE, enonce2,
+			  TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0);
+	if (ret < 0)
+		goto out;
+	if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
+		ret = -EINVAL;
+out:
+	kzfree(sdesc);
+	return ret;
+}
+
+/*
+ * For key specific tpm requests, we will generate and send our
+ * own TPM command packets using the drivers send function.
+ */
+static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd,
+			    size_t buflen)
+{
+	int rc;
+
+	dump_tpm_buf(cmd);
+	rc = tpm_send(chip_num, cmd, buflen);
+	dump_tpm_buf(cmd);
+	if (rc > 0)
+		/* Can't return positive return codes values to keyctl */
+		rc = -EPERM;
+	return rc;
+}
+
+/*
+ * Lock a trusted key, by extending a selected PCR.
+ *
+ * Prevents a trusted key that is sealed to PCRs from being accessed.
+ * This uses the tpm driver's extend function.
+ */
+static int pcrlock(const int pcrnum)
+{
+	unsigned char hash[SHA1_DIGEST_SIZE];
+	int ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE);
+	if (ret != SHA1_DIGEST_SIZE)
+		return ret;
+	return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0;
+}
+
+/*
+ * Create an object specific authorisation protocol (OSAP) session
+ */
+static int osap(struct tpm_buf *tb, struct osapsess *s,
+		const unsigned char *key, uint16_t type, uint32_t handle)
+{
+	unsigned char enonce[TPM_NONCE_SIZE];
+	unsigned char ononce[TPM_NONCE_SIZE];
+	int ret;
+
+	ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE);
+	if (ret != TPM_NONCE_SIZE)
+		return ret;
+
+	INIT_BUF(tb);
+	store16(tb, TPM_TAG_RQU_COMMAND);
+	store32(tb, TPM_OSAP_SIZE);
+	store32(tb, TPM_ORD_OSAP);
+	store16(tb, type);
+	store32(tb, handle);
+	storebytes(tb, ononce, TPM_NONCE_SIZE);
+
+	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+	if (ret < 0)
+		return ret;
+
+	s->handle = LOAD32(tb->data, TPM_DATA_OFFSET);
+	memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]),
+	       TPM_NONCE_SIZE);
+	memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) +
+				  TPM_NONCE_SIZE]), TPM_NONCE_SIZE);
+	return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE,
+			   enonce, TPM_NONCE_SIZE, ononce, 0, 0);
+}
+
+/*
+ * Create an object independent authorisation protocol (oiap) session
+ */
+static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
+{
+	int ret;
+
+	INIT_BUF(tb);
+	store16(tb, TPM_TAG_RQU_COMMAND);
+	store32(tb, TPM_OIAP_SIZE);
+	store32(tb, TPM_ORD_OIAP);
+	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+	if (ret < 0)
+		return ret;
+
+	*handle = LOAD32(tb->data, TPM_DATA_OFFSET);
+	memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)],
+	       TPM_NONCE_SIZE);
+	return 0;
+}
+
+struct tpm_digests {
+	unsigned char encauth[SHA1_DIGEST_SIZE];
+	unsigned char pubauth[SHA1_DIGEST_SIZE];
+	unsigned char xorwork[SHA1_DIGEST_SIZE * 2];
+	unsigned char xorhash[SHA1_DIGEST_SIZE];
+	unsigned char nonceodd[TPM_NONCE_SIZE];
+};
+
+/*
+ * Have the TPM seal(encrypt) the trusted key, possibly based on
+ * Platform Configuration Registers (PCRs). AUTH1 for sealing key.
+ */
+static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
+		    uint32_t keyhandle, const unsigned char *keyauth,
+		    const unsigned char *data, uint32_t datalen,
+		    unsigned char *blob, uint32_t *bloblen,
+		    const unsigned char *blobauth,
+		    const unsigned char *pcrinfo, uint32_t pcrinfosize)
+{
+	struct osapsess sess;
+	struct tpm_digests *td;
+	unsigned char cont;
+	uint32_t ordinal;
+	uint32_t pcrsize;
+	uint32_t datsize;
+	int sealinfosize;
+	int encdatasize;
+	int storedsize;
+	int ret;
+	int i;
+
+	/* alloc some work space for all the hashes */
+	td = kmalloc(sizeof *td, GFP_KERNEL);
+	if (!td)
+		return -ENOMEM;
+
+	/* get session for sealing key */
+	ret = osap(tb, &sess, keyauth, keytype, keyhandle);
+	if (ret < 0)
+		goto out;
+	dump_sess(&sess);
+
+	/* calculate encrypted authorization value */
+	memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE);
+	memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE);
+	ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash);
+	if (ret < 0)
+		goto out;
+
+	ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE);
+	if (ret != TPM_NONCE_SIZE)
+		goto out;
+	ordinal = htonl(TPM_ORD_SEAL);
+	datsize = htonl(datalen);
+	pcrsize = htonl(pcrinfosize);
+	cont = 0;
+
+	/* encrypt data authorization key */
+	for (i = 0; i < SHA1_DIGEST_SIZE; ++i)
+		td->encauth[i] = td->xorhash[i] ^ blobauth[i];
+
+	/* calculate authorization HMAC value */
+	if (pcrinfosize == 0) {
+		/* no pcr info specified */
+		ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
+				   sess.enonce, td->nonceodd, cont,
+				   sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
+				   td->encauth, sizeof(uint32_t), &pcrsize,
+				   sizeof(uint32_t), &datsize, datalen, data, 0,
+				   0);
+	} else {
+		/* pcr info specified */
+		ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
+				   sess.enonce, td->nonceodd, cont,
+				   sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
+				   td->encauth, sizeof(uint32_t), &pcrsize,
+				   pcrinfosize, pcrinfo, sizeof(uint32_t),
+				   &datsize, datalen, data, 0, 0);
+	}
+	if (ret < 0)
+		goto out;
+
+	/* build and send the TPM request packet */
+	INIT_BUF(tb);
+	store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
+	store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen);
+	store32(tb, TPM_ORD_SEAL);
+	store32(tb, keyhandle);
+	storebytes(tb, td->encauth, SHA1_DIGEST_SIZE);
+	store32(tb, pcrinfosize);
+	storebytes(tb, pcrinfo, pcrinfosize);
+	store32(tb, datalen);
+	storebytes(tb, data, datalen);
+	store32(tb, sess.handle);
+	storebytes(tb, td->nonceodd, TPM_NONCE_SIZE);
+	store8(tb, cont);
+	storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE);
+
+	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+	if (ret < 0)
+		goto out;
+
+	/* calculate the size of the returned Blob */
+	sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t));
+	encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) +
+			     sizeof(uint32_t) + sealinfosize);
+	storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize +
+	    sizeof(uint32_t) + encdatasize;
+
+	/* check the HMAC in the response */
+	ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret,
+			     SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0,
+			     0);
+
+	/* copy the returned blob to caller */
+	if (!ret) {
+		memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize);
+		*bloblen = storedsize;
+	}
+out:
+	kzfree(td);
+	return ret;
+}
+
+/*
+ * use the AUTH2_COMMAND form of unseal, to authorize both key and blob
+ */
+static int tpm_unseal(struct tpm_buf *tb,
+		      uint32_t keyhandle, const unsigned char *keyauth,
+		      const unsigned char *blob, int bloblen,
+		      const unsigned char *blobauth,
+		      unsigned char *data, unsigned int *datalen)
+{
+	unsigned char nonceodd[TPM_NONCE_SIZE];
+	unsigned char enonce1[TPM_NONCE_SIZE];
+	unsigned char enonce2[TPM_NONCE_SIZE];
+	unsigned char authdata1[SHA1_DIGEST_SIZE];
+	unsigned char authdata2[SHA1_DIGEST_SIZE];
+	uint32_t authhandle1 = 0;
+	uint32_t authhandle2 = 0;
+	unsigned char cont = 0;
+	uint32_t ordinal;
+	uint32_t keyhndl;
+	int ret;
+
+	/* sessions for unsealing key and data */
+	ret = oiap(tb, &authhandle1, enonce1);
+	if (ret < 0) {
+		pr_info("trusted_key: oiap failed (%d)\n", ret);
+		return ret;
+	}
+	ret = oiap(tb, &authhandle2, enonce2);
+	if (ret < 0) {
+		pr_info("trusted_key: oiap failed (%d)\n", ret);
+		return ret;
+	}
+
+	ordinal = htonl(TPM_ORD_UNSEAL);
+	keyhndl = htonl(SRKHANDLE);
+	ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE);
+	if (ret != TPM_NONCE_SIZE) {
+		pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
+		return ret;
+	}
+	ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE,
+			   enonce1, nonceodd, cont, sizeof(uint32_t),
+			   &ordinal, bloblen, blob, 0, 0);
+	if (ret < 0)
+		return ret;
+	ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE,
+			   enonce2, nonceodd, cont, sizeof(uint32_t),
+			   &ordinal, bloblen, blob, 0, 0);
+	if (ret < 0)
+		return ret;
+
+	/* build and send TPM request packet */
+	INIT_BUF(tb);
+	store16(tb, TPM_TAG_RQU_AUTH2_COMMAND);
+	store32(tb, TPM_UNSEAL_SIZE + bloblen);
+	store32(tb, TPM_ORD_UNSEAL);
+	store32(tb, keyhandle);
+	storebytes(tb, blob, bloblen);
+	store32(tb, authhandle1);
+	storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+	store8(tb, cont);
+	storebytes(tb, authdata1, SHA1_DIGEST_SIZE);
+	store32(tb, authhandle2);
+	storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+	store8(tb, cont);
+	storebytes(tb, authdata2, SHA1_DIGEST_SIZE);
+
+	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+	if (ret < 0) {
+		pr_info("trusted_key: authhmac failed (%d)\n", ret);
+		return ret;
+	}
+
+	*datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
+	ret = TSS_checkhmac2(tb->data, ordinal, nonceodd,
+			     keyauth, SHA1_DIGEST_SIZE,
+			     blobauth, SHA1_DIGEST_SIZE,
+			     sizeof(uint32_t), TPM_DATA_OFFSET,
+			     *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0,
+			     0);
+	if (ret < 0) {
+		pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret);
+		return ret;
+	}
+	memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen);
+	return 0;
+}
+
+/*
+ * Have the TPM seal(encrypt) the symmetric key
+ */
+static int key_seal(struct trusted_key_payload *p,
+		    struct trusted_key_options *o)
+{
+	struct tpm_buf *tb;
+	int ret;
+
+	tb = kzalloc(sizeof *tb, GFP_KERNEL);
+	if (!tb)
+		return -ENOMEM;
+
+	/* include migratable flag at end of sealed key */
+	p->key[p->key_len] = p->migratable;
+
+	ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth,
+		       p->key, p->key_len + 1, p->blob, &p->blob_len,
+		       o->blobauth, o->pcrinfo, o->pcrinfo_len);
+	if (ret < 0)
+		pr_info("trusted_key: srkseal failed (%d)\n", ret);
+
+	kzfree(tb);
+	return ret;
+}
+
+/*
+ * Have the TPM unseal(decrypt) the symmetric key
+ */
+static int key_unseal(struct trusted_key_payload *p,
+		      struct trusted_key_options *o)
+{
+	struct tpm_buf *tb;
+	int ret;
+
+	tb = kzalloc(sizeof *tb, GFP_KERNEL);
+	if (!tb)
+		return -ENOMEM;
+
+	ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
+			 o->blobauth, p->key, &p->key_len);
+	if (ret < 0)
+		pr_info("trusted_key: srkunseal failed (%d)\n", ret);
+	else
+		/* pull migratable flag out of sealed key */
+		p->migratable = p->key[--p->key_len];
+
+	kzfree(tb);
+	return ret;
+}
+
+enum {
+	Opt_err = -1,
+	Opt_new, Opt_load, Opt_update,
+	Opt_keyhandle, Opt_keyauth, Opt_blobauth,
+	Opt_pcrinfo, Opt_pcrlock, Opt_migratable
+};
+
+static const match_table_t key_tokens = {
+	{Opt_new, "new"},
+	{Opt_load, "load"},
+	{Opt_update, "update"},
+	{Opt_keyhandle, "keyhandle=%s"},
+	{Opt_keyauth, "keyauth=%s"},
+	{Opt_blobauth, "blobauth=%s"},
+	{Opt_pcrinfo, "pcrinfo=%s"},
+	{Opt_pcrlock, "pcrlock=%s"},
+	{Opt_migratable, "migratable=%s"},
+	{Opt_err, NULL}
+};
+
+/* can have zero or more token= options */
+static int getoptions(char *c, struct trusted_key_payload *pay,
+		      struct trusted_key_options *opt)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *p = c;
+	int token;
+	int res;
+	unsigned long handle;
+	unsigned long lock;
+
+	while ((p = strsep(&c, " \t"))) {
+		if (*p == '\0' || *p == ' ' || *p == '\t')
+			continue;
+		token = match_token(p, key_tokens, args);
+
+		switch (token) {
+		case Opt_pcrinfo:
+			opt->pcrinfo_len = strlen(args[0].from) / 2;
+			if (opt->pcrinfo_len > MAX_PCRINFO_SIZE)
+				return -EINVAL;
+			res = hex2bin(opt->pcrinfo, args[0].from,
+				      opt->pcrinfo_len);
+			if (res < 0)
+				return -EINVAL;
+			break;
+		case Opt_keyhandle:
+			res = kstrtoul(args[0].from, 16, &handle);
+			if (res < 0)
+				return -EINVAL;
+			opt->keytype = SEAL_keytype;
+			opt->keyhandle = handle;
+			break;
+		case Opt_keyauth:
+			if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
+				return -EINVAL;
+			res = hex2bin(opt->keyauth, args[0].from,
+				      SHA1_DIGEST_SIZE);
+			if (res < 0)
+				return -EINVAL;
+			break;
+		case Opt_blobauth:
+			if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
+				return -EINVAL;
+			res = hex2bin(opt->blobauth, args[0].from,
+				      SHA1_DIGEST_SIZE);
+			if (res < 0)
+				return -EINVAL;
+			break;
+		case Opt_migratable:
+			if (*args[0].from == '0')
+				pay->migratable = 0;
+			else
+				return -EINVAL;
+			break;
+		case Opt_pcrlock:
+			res = kstrtoul(args[0].from, 10, &lock);
+			if (res < 0)
+				return -EINVAL;
+			opt->pcrlock = lock;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+/*
+ * datablob_parse - parse the keyctl data and fill in the
+ * 		    payload and options structures
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char *datablob, struct trusted_key_payload *p,
+			  struct trusted_key_options *o)
+{
+	substring_t args[MAX_OPT_ARGS];
+	long keylen;
+	int ret = -EINVAL;
+	int key_cmd;
+	char *c;
+
+	/* main command */
+	c = strsep(&datablob, " \t");
+	if (!c)
+		return -EINVAL;
+	key_cmd = match_token(c, key_tokens, args);
+	switch (key_cmd) {
+	case Opt_new:
+		/* first argument is key size */
+		c = strsep(&datablob, " \t");
+		if (!c)
+			return -EINVAL;
+		ret = kstrtol(c, 10, &keylen);
+		if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
+			return -EINVAL;
+		p->key_len = keylen;
+		ret = getoptions(datablob, p, o);
+		if (ret < 0)
+			return ret;
+		ret = Opt_new;
+		break;
+	case Opt_load:
+		/* first argument is sealed blob */
+		c = strsep(&datablob, " \t");
+		if (!c)
+			return -EINVAL;
+		p->blob_len = strlen(c) / 2;
+		if (p->blob_len > MAX_BLOB_SIZE)
+			return -EINVAL;
+		ret = hex2bin(p->blob, c, p->blob_len);
+		if (ret < 0)
+			return -EINVAL;
+		ret = getoptions(datablob, p, o);
+		if (ret < 0)
+			return ret;
+		ret = Opt_load;
+		break;
+	case Opt_update:
+		/* all arguments are options */
+		ret = getoptions(datablob, p, o);
+		if (ret < 0)
+			return ret;
+		ret = Opt_update;
+		break;
+	case Opt_err:
+		return -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static struct trusted_key_options *trusted_options_alloc(void)
+{
+	struct trusted_key_options *options;
+	int tpm2;
+
+	tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
+	if (tpm2 < 0)
+		return NULL;
+
+	options = kzalloc(sizeof *options, GFP_KERNEL);
+	if (options) {
+		/* set any non-zero defaults */
+		options->keytype = SRK_keytype;
+
+		if (!tpm2)
+			options->keyhandle = SRKHANDLE;
+	}
+	return options;
+}
+
+static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
+{
+	struct trusted_key_payload *p = NULL;
+	int ret;
+
+	ret = key_payload_reserve(key, sizeof *p);
+	if (ret < 0)
+		return p;
+	p = kzalloc(sizeof *p, GFP_KERNEL);
+	if (p)
+		p->migratable = 1; /* migratable by default */
+	return p;
+}
+
+/*
+ * trusted_instantiate - create a new trusted key
+ *
+ * Unseal an existing trusted blob or, for a new key, get a
+ * random key, then seal and create a trusted key-type key,
+ * adding it to the specified keyring.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int trusted_instantiate(struct key *key,
+			       struct key_preparsed_payload *prep)
+{
+	struct trusted_key_payload *payload = NULL;
+	struct trusted_key_options *options = NULL;
+	size_t datalen = prep->datalen;
+	char *datablob;
+	int ret = 0;
+	int key_cmd;
+	size_t key_len;
+	int tpm2;
+
+	tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
+	if (tpm2 < 0)
+		return tpm2;
+
+	if (datalen <= 0 || datalen > 32767 || !prep->data)
+		return -EINVAL;
+
+	datablob = kmalloc(datalen + 1, GFP_KERNEL);
+	if (!datablob)
+		return -ENOMEM;
+	memcpy(datablob, prep->data, datalen);
+	datablob[datalen] = '\0';
+
+	options = trusted_options_alloc();
+	if (!options) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	payload = trusted_payload_alloc(key);
+	if (!payload) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	key_cmd = datablob_parse(datablob, payload, options);
+	if (key_cmd < 0) {
+		ret = key_cmd;
+		goto out;
+	}
+
+	if (!options->keyhandle) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dump_payload(payload);
+	dump_options(options);
+
+	switch (key_cmd) {
+	case Opt_load:
+		if (tpm2)
+			ret = tpm_unseal_trusted(TPM_ANY_NUM, payload, options);
+		else
+			ret = key_unseal(payload, options);
+		dump_payload(payload);
+		dump_options(options);
+		if (ret < 0)
+			pr_info("trusted_key: key_unseal failed (%d)\n", ret);
+		break;
+	case Opt_new:
+		key_len = payload->key_len;
+		ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len);
+		if (ret != key_len) {
+			pr_info("trusted_key: key_create failed (%d)\n", ret);
+			goto out;
+		}
+		if (tpm2)
+			ret = tpm_seal_trusted(TPM_ANY_NUM, payload, options);
+		else
+			ret = key_seal(payload, options);
+		if (ret < 0)
+			pr_info("trusted_key: key_seal failed (%d)\n", ret);
+		break;
+	default:
+		ret = -EINVAL;
+		goto out;
+	}
+	if (!ret && options->pcrlock)
+		ret = pcrlock(options->pcrlock);
+out:
+	kzfree(datablob);
+	kzfree(options);
+	if (!ret)
+		rcu_assign_keypointer(key, payload);
+	else
+		kzfree(payload);
+	return ret;
+}
+
+static void trusted_rcu_free(struct rcu_head *rcu)
+{
+	struct trusted_key_payload *p;
+
+	p = container_of(rcu, struct trusted_key_payload, rcu);
+	kzfree(p);
+}
+
+/*
+ * trusted_update - reseal an existing key with new PCR values
+ */
+static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
+{
+	struct trusted_key_payload *p;
+	struct trusted_key_payload *new_p;
+	struct trusted_key_options *new_o;
+	size_t datalen = prep->datalen;
+	char *datablob;
+	int ret = 0;
+
+	if (key_is_negative(key))
+		return -ENOKEY;
+	p = key->payload.data[0];
+	if (!p->migratable)
+		return -EPERM;
+	if (datalen <= 0 || datalen > 32767 || !prep->data)
+		return -EINVAL;
+
+	datablob = kmalloc(datalen + 1, GFP_KERNEL);
+	if (!datablob)
+		return -ENOMEM;
+	new_o = trusted_options_alloc();
+	if (!new_o) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	new_p = trusted_payload_alloc(key);
+	if (!new_p) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	memcpy(datablob, prep->data, datalen);
+	datablob[datalen] = '\0';
+	ret = datablob_parse(datablob, new_p, new_o);
+	if (ret != Opt_update) {
+		ret = -EINVAL;
+		kzfree(new_p);
+		goto out;
+	}
+
+	if (!new_o->keyhandle) {
+		ret = -EINVAL;
+		kzfree(new_p);
+		goto out;
+	}
+
+	/* copy old key values, and reseal with new pcrs */
+	new_p->migratable = p->migratable;
+	new_p->key_len = p->key_len;
+	memcpy(new_p->key, p->key, p->key_len);
+	dump_payload(p);
+	dump_payload(new_p);
+
+	ret = key_seal(new_p, new_o);
+	if (ret < 0) {
+		pr_info("trusted_key: key_seal failed (%d)\n", ret);
+		kzfree(new_p);
+		goto out;
+	}
+	if (new_o->pcrlock) {
+		ret = pcrlock(new_o->pcrlock);
+		if (ret < 0) {
+			pr_info("trusted_key: pcrlock failed (%d)\n", ret);
+			kzfree(new_p);
+			goto out;
+		}
+	}
+	rcu_assign_keypointer(key, new_p);
+	call_rcu(&p->rcu, trusted_rcu_free);
+out:
+	kzfree(datablob);
+	kzfree(new_o);
+	return ret;
+}
+
+/*
+ * trusted_read - copy the sealed blob data to userspace in hex.
+ * On success, return to userspace the trusted key datablob size.
+ */
+static long trusted_read(const struct key *key, char __user *buffer,
+			 size_t buflen)
+{
+	struct trusted_key_payload *p;
+	char *ascii_buf;
+	char *bufp;
+	int i;
+
+	p = rcu_dereference_key(key);
+	if (!p)
+		return -EINVAL;
+
+	if (buffer && buflen >= 2 * p->blob_len) {
+		ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
+		if (!ascii_buf)
+			return -ENOMEM;
+
+		bufp = ascii_buf;
+		for (i = 0; i < p->blob_len; i++)
+			bufp = hex_byte_pack(bufp, p->blob[i]);
+		if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
+			kzfree(ascii_buf);
+			return -EFAULT;
+		}
+		kzfree(ascii_buf);
+	}
+	return 2 * p->blob_len;
+}
+
+/*
+ * trusted_destroy - clear and free the key's payload
+ */
+static void trusted_destroy(struct key *key)
+{
+	kzfree(key->payload.data[0]);
+}
+
+struct key_type key_type_trusted = {
+	.name = "trusted",
+	.instantiate = trusted_instantiate,
+	.update = trusted_update,
+	.destroy = trusted_destroy,
+	.describe = user_describe,
+	.read = trusted_read,
+};
+
+EXPORT_SYMBOL_GPL(key_type_trusted);
+
+static void trusted_shash_release(void)
+{
+	if (hashalg)
+		crypto_free_shash(hashalg);
+	if (hmacalg)
+		crypto_free_shash(hmacalg);
+}
+
+static int __init trusted_shash_alloc(void)
+{
+	int ret;
+
+	hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hmacalg)) {
+		pr_info("trusted_key: could not allocate crypto %s\n",
+			hmac_alg);
+		return PTR_ERR(hmacalg);
+	}
+
+	hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hashalg)) {
+		pr_info("trusted_key: could not allocate crypto %s\n",
+			hash_alg);
+		ret = PTR_ERR(hashalg);
+		goto hashalg_fail;
+	}
+
+	return 0;
+
+hashalg_fail:
+	crypto_free_shash(hmacalg);
+	return ret;
+}
+
+static int __init init_trusted(void)
+{
+	int ret;
+
+	ret = trusted_shash_alloc();
+	if (ret < 0)
+		return ret;
+	ret = register_key_type(&key_type_trusted);
+	if (ret < 0)
+		trusted_shash_release();
+	return ret;
+}
+
+static void __exit cleanup_trusted(void)
+{
+	trusted_shash_release();
+	unregister_key_type(&key_type_trusted);
+}
+
+late_initcall(init_trusted);
+module_exit(cleanup_trusted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/trusted.h b/security/keys/trusted.h
new file mode 100644
index 0000000..ff001a5
--- /dev/null
+++ b/security/keys/trusted.h
@@ -0,0 +1,123 @@
+#ifndef __TRUSTED_KEY_H
+#define __TRUSTED_KEY_H
+
+/* implementation specific TPM constants */
+#define MAX_BUF_SIZE			512
+#define TPM_GETRANDOM_SIZE		14
+#define TPM_OSAP_SIZE			36
+#define TPM_OIAP_SIZE			10
+#define TPM_SEAL_SIZE			87
+#define TPM_UNSEAL_SIZE			104
+#define TPM_SIZE_OFFSET			2
+#define TPM_RETURN_OFFSET		6
+#define TPM_DATA_OFFSET			10
+
+#define LOAD32(buffer, offset)	(ntohl(*(uint32_t *)&buffer[offset]))
+#define LOAD32N(buffer, offset)	(*(uint32_t *)&buffer[offset])
+#define LOAD16(buffer, offset)	(ntohs(*(uint16_t *)&buffer[offset]))
+
+struct tpm_buf {
+	int len;
+	unsigned char data[MAX_BUF_SIZE];
+};
+
+#define INIT_BUF(tb) (tb->len = 0)
+
+struct osapsess {
+	uint32_t handle;
+	unsigned char secret[SHA1_DIGEST_SIZE];
+	unsigned char enonce[TPM_NONCE_SIZE];
+};
+
+/* discrete values, but have to store in uint16_t for TPM use */
+enum {
+	SEAL_keytype = 1,
+	SRK_keytype = 4
+};
+
+#define TPM_DEBUG 0
+
+#if TPM_DEBUG
+static inline void dump_options(struct trusted_key_options *o)
+{
+	pr_info("trusted_key: sealing key type %d\n", o->keytype);
+	pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle);
+	pr_info("trusted_key: pcrlock %d\n", o->pcrlock);
+	pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len);
+	print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
+		       16, 1, o->pcrinfo, o->pcrinfo_len, 0);
+}
+
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+	pr_info("trusted_key: key_len %d\n", p->key_len);
+	print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
+		       16, 1, p->key, p->key_len, 0);
+	pr_info("trusted_key: bloblen %d\n", p->blob_len);
+	print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
+		       16, 1, p->blob, p->blob_len, 0);
+	pr_info("trusted_key: migratable %d\n", p->migratable);
+}
+
+static inline void dump_sess(struct osapsess *s)
+{
+	print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
+		       16, 1, &s->handle, 4, 0);
+	pr_info("trusted-key: secret:\n");
+	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
+		       16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
+	pr_info("trusted-key: enonce:\n");
+	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
+		       16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0);
+}
+
+static inline void dump_tpm_buf(unsigned char *buf)
+{
+	int len;
+
+	pr_info("\ntrusted-key: tpm buffer\n");
+	len = LOAD32(buf, TPM_SIZE_OFFSET);
+	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
+}
+#else
+static inline void dump_options(struct trusted_key_options *o)
+{
+}
+
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+}
+
+static inline void dump_sess(struct osapsess *s)
+{
+}
+
+static inline void dump_tpm_buf(unsigned char *buf)
+{
+}
+#endif
+
+static inline void store8(struct tpm_buf *buf, const unsigned char value)
+{
+	buf->data[buf->len++] = value;
+}
+
+static inline void store16(struct tpm_buf *buf, const uint16_t value)
+{
+	*(uint16_t *) & buf->data[buf->len] = htons(value);
+	buf->len += sizeof value;
+}
+
+static inline void store32(struct tpm_buf *buf, const uint32_t value)
+{
+	*(uint32_t *) & buf->data[buf->len] = htonl(value);
+	buf->len += sizeof value;
+}
+
+static inline void storebytes(struct tpm_buf *buf, const unsigned char *in,
+			      const int len)
+{
+	memcpy(buf->data + buf->len, in, len);
+	buf->len += len;
+}
+#endif
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
new file mode 100644
index 0000000..eba8a51
--- /dev/null
+++ b/security/keys/user_defined.c
@@ -0,0 +1,224 @@
+/* user_defined.c: user defined key type
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+
+static int logon_vet_description(const char *desc);
+
+/*
+ * user defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_user = {
+	.name			= "user",
+	.preparse		= user_preparse,
+	.free_preparse		= user_free_preparse,
+	.instantiate		= generic_key_instantiate,
+	.update			= user_update,
+	.revoke			= user_revoke,
+	.destroy		= user_destroy,
+	.describe		= user_describe,
+	.read			= user_read,
+};
+
+EXPORT_SYMBOL_GPL(key_type_user);
+
+/*
+ * This key type is essentially the same as key_type_user, but it does
+ * not define a .read op. This is suitable for storing username and
+ * password pairs in the keyring that you do not want to be readable
+ * from userspace.
+ */
+struct key_type key_type_logon = {
+	.name			= "logon",
+	.preparse		= user_preparse,
+	.free_preparse		= user_free_preparse,
+	.instantiate		= generic_key_instantiate,
+	.update			= user_update,
+	.revoke			= user_revoke,
+	.destroy		= user_destroy,
+	.describe		= user_describe,
+	.vet_description	= logon_vet_description,
+};
+EXPORT_SYMBOL_GPL(key_type_logon);
+
+/*
+ * Preparse a user defined key payload
+ */
+int user_preparse(struct key_preparsed_payload *prep)
+{
+	struct user_key_payload *upayload;
+	size_t datalen = prep->datalen;
+
+	if (datalen <= 0 || datalen > 32767 || !prep->data)
+		return -EINVAL;
+
+	upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
+	if (!upayload)
+		return -ENOMEM;
+
+	/* attach the data */
+	prep->quotalen = datalen;
+	prep->payload.data[0] = upayload;
+	upayload->datalen = datalen;
+	memcpy(upayload->data, prep->data, datalen);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(user_preparse);
+
+/*
+ * Free a preparse of a user defined key payload
+ */
+void user_free_preparse(struct key_preparsed_payload *prep)
+{
+	kfree(prep->payload.data[0]);
+}
+EXPORT_SYMBOL_GPL(user_free_preparse);
+
+/*
+ * update a user defined key
+ * - the key's semaphore is write-locked
+ */
+int user_update(struct key *key, struct key_preparsed_payload *prep)
+{
+	struct user_key_payload *upayload, *zap;
+	size_t datalen = prep->datalen;
+	int ret;
+
+	ret = -EINVAL;
+	if (datalen <= 0 || datalen > 32767 || !prep->data)
+		goto error;
+
+	/* construct a replacement payload */
+	ret = -ENOMEM;
+	upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
+	if (!upayload)
+		goto error;
+
+	upayload->datalen = datalen;
+	memcpy(upayload->data, prep->data, datalen);
+
+	/* check the quota and attach the new data */
+	zap = upayload;
+
+	ret = key_payload_reserve(key, datalen);
+
+	if (ret == 0) {
+		/* attach the new data, displacing the old */
+		if (key_is_positive(key))
+			zap = key->payload.data[0];
+		else
+			zap = NULL;
+		rcu_assign_keypointer(key, upayload);
+		key->expiry = 0;
+	}
+
+	if (zap)
+		kfree_rcu(zap, rcu);
+
+error:
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(user_update);
+
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void user_revoke(struct key *key)
+{
+	struct user_key_payload *upayload = key->payload.data[0];
+
+	/* clear the quota */
+	key_payload_reserve(key, 0);
+
+	if (upayload) {
+		rcu_assign_keypointer(key, NULL);
+		kfree_rcu(upayload, rcu);
+	}
+}
+
+EXPORT_SYMBOL(user_revoke);
+
+/*
+ * dispose of the data dangling from the corpse of a user key
+ */
+void user_destroy(struct key *key)
+{
+	struct user_key_payload *upayload = key->payload.data[0];
+
+	kfree(upayload);
+}
+
+EXPORT_SYMBOL_GPL(user_destroy);
+
+/*
+ * describe the user key
+ */
+void user_describe(const struct key *key, struct seq_file *m)
+{
+	seq_puts(m, key->description);
+	if (key_is_positive(key))
+		seq_printf(m, ": %u", key->datalen);
+}
+
+EXPORT_SYMBOL_GPL(user_describe);
+
+/*
+ * read the key data
+ * - the key's semaphore is read-locked
+ */
+long user_read(const struct key *key, char __user *buffer, size_t buflen)
+{
+	const struct user_key_payload *upayload;
+	long ret;
+
+	upayload = user_key_payload(key);
+	ret = upayload->datalen;
+
+	/* we can return the data as is */
+	if (buffer && buflen > 0) {
+		if (buflen > upayload->datalen)
+			buflen = upayload->datalen;
+
+		if (copy_to_user(buffer, upayload->data, buflen) != 0)
+			ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(user_read);
+
+/* Vet the description for a "logon" key */
+static int logon_vet_description(const char *desc)
+{
+	char *p;
+
+	/* require a "qualified" description string */
+	p = strchr(desc, ':');
+	if (!p)
+		return -EINVAL;
+
+	/* also reject description with ':' as first char */
+	if (p == desc)
+		return -EINVAL;
+
+	return 0;
+}
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
new file mode 100644
index 0000000..cccbf30
--- /dev/null
+++ b/security/lsm_audit.c
@@ -0,0 +1,436 @@
+/*
+ * common LSM auditing functions
+ *
+ * Based on code written for SELinux by :
+ *			Stephen Smalley, <sds@epoch.ncsc.mil>
+ * 			James Morris <jmorris@redhat.com>
+ * Author : Etienne Basset, <etienne.basset@ensta.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <net/sock.h>
+#include <linux/un.h>
+#include <net/af_unix.h>
+#include <linux/audit.h>
+#include <linux/ipv6.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/dccp.h>
+#include <linux/sctp.h>
+#include <linux/lsm_audit.h>
+
+/**
+ * ipv4_skb_to_auditdata : fill auditdata from skb
+ * @skb : the skb
+ * @ad : the audit data to fill
+ * @proto : the layer 4 protocol
+ *
+ * return  0 on success
+ */
+int ipv4_skb_to_auditdata(struct sk_buff *skb,
+		struct common_audit_data *ad, u8 *proto)
+{
+	int ret = 0;
+	struct iphdr *ih;
+
+	ih = ip_hdr(skb);
+	if (ih == NULL)
+		return -EINVAL;
+
+	ad->u.net->v4info.saddr = ih->saddr;
+	ad->u.net->v4info.daddr = ih->daddr;
+
+	if (proto)
+		*proto = ih->protocol;
+	/* non initial fragment */
+	if (ntohs(ih->frag_off) & IP_OFFSET)
+		return 0;
+
+	switch (ih->protocol) {
+	case IPPROTO_TCP: {
+		struct tcphdr *th = tcp_hdr(skb);
+		if (th == NULL)
+			break;
+
+		ad->u.net->sport = th->source;
+		ad->u.net->dport = th->dest;
+		break;
+	}
+	case IPPROTO_UDP: {
+		struct udphdr *uh = udp_hdr(skb);
+		if (uh == NULL)
+			break;
+
+		ad->u.net->sport = uh->source;
+		ad->u.net->dport = uh->dest;
+		break;
+	}
+	case IPPROTO_DCCP: {
+		struct dccp_hdr *dh = dccp_hdr(skb);
+		if (dh == NULL)
+			break;
+
+		ad->u.net->sport = dh->dccph_sport;
+		ad->u.net->dport = dh->dccph_dport;
+		break;
+	}
+	case IPPROTO_SCTP: {
+		struct sctphdr *sh = sctp_hdr(skb);
+		if (sh == NULL)
+			break;
+		ad->u.net->sport = sh->source;
+		ad->u.net->dport = sh->dest;
+		break;
+	}
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+/**
+ * ipv6_skb_to_auditdata : fill auditdata from skb
+ * @skb : the skb
+ * @ad : the audit data to fill
+ * @proto : the layer 4 protocol
+ *
+ * return  0 on success
+ */
+int ipv6_skb_to_auditdata(struct sk_buff *skb,
+		struct common_audit_data *ad, u8 *proto)
+{
+	int offset, ret = 0;
+	struct ipv6hdr *ip6;
+	u8 nexthdr;
+	__be16 frag_off;
+
+	ip6 = ipv6_hdr(skb);
+	if (ip6 == NULL)
+		return -EINVAL;
+	ad->u.net->v6info.saddr = ip6->saddr;
+	ad->u.net->v6info.daddr = ip6->daddr;
+	ret = 0;
+	/* IPv6 can have several extension header before the Transport header
+	 * skip them */
+	offset = skb_network_offset(skb);
+	offset += sizeof(*ip6);
+	nexthdr = ip6->nexthdr;
+	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+	if (offset < 0)
+		return 0;
+	if (proto)
+		*proto = nexthdr;
+	switch (nexthdr) {
+	case IPPROTO_TCP: {
+		struct tcphdr _tcph, *th;
+
+		th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+		if (th == NULL)
+			break;
+
+		ad->u.net->sport = th->source;
+		ad->u.net->dport = th->dest;
+		break;
+	}
+	case IPPROTO_UDP: {
+		struct udphdr _udph, *uh;
+
+		uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+		if (uh == NULL)
+			break;
+
+		ad->u.net->sport = uh->source;
+		ad->u.net->dport = uh->dest;
+		break;
+	}
+	case IPPROTO_DCCP: {
+		struct dccp_hdr _dccph, *dh;
+
+		dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+		if (dh == NULL)
+			break;
+
+		ad->u.net->sport = dh->dccph_sport;
+		ad->u.net->dport = dh->dccph_dport;
+		break;
+	}
+	case IPPROTO_SCTP: {
+		struct sctphdr _sctph, *sh;
+
+		sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
+		if (sh == NULL)
+			break;
+		ad->u.net->sport = sh->source;
+		ad->u.net->dport = sh->dest;
+		break;
+	}
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+#endif
+
+
+static inline void print_ipv6_addr(struct audit_buffer *ab,
+				   struct in6_addr *addr, __be16 port,
+				   char *name1, char *name2)
+{
+	if (!ipv6_addr_any(addr))
+		audit_log_format(ab, " %s=%pI6c", name1, addr);
+	if (port)
+		audit_log_format(ab, " %s=%d", name2, ntohs(port));
+}
+
+static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
+				   __be16 port, char *name1, char *name2)
+{
+	if (addr)
+		audit_log_format(ab, " %s=%pI4", name1, &addr);
+	if (port)
+		audit_log_format(ab, " %s=%d", name2, ntohs(port));
+}
+
+/**
+ * dump_common_audit_data - helper to dump common audit data
+ * @a : common audit data
+ *
+ */
+static void dump_common_audit_data(struct audit_buffer *ab,
+				   struct common_audit_data *a)
+{
+	char comm[sizeof(current->comm)];
+
+	/*
+	 * To keep stack sizes in check force programers to notice if they
+	 * start making this union too large!  See struct lsm_network_audit
+	 * as an example of how to deal with large data.
+	 */
+	BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
+
+	audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
+	audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm)));
+
+	switch (a->type) {
+	case LSM_AUDIT_DATA_NONE:
+		return;
+	case LSM_AUDIT_DATA_IPC:
+		audit_log_format(ab, " key=%d ", a->u.ipc_id);
+		break;
+	case LSM_AUDIT_DATA_CAP:
+		audit_log_format(ab, " capability=%d ", a->u.cap);
+		break;
+	case LSM_AUDIT_DATA_PATH: {
+		struct inode *inode;
+
+		audit_log_d_path(ab, " path=", &a->u.path);
+
+		inode = d_backing_inode(a->u.path.dentry);
+		if (inode) {
+			audit_log_format(ab, " dev=");
+			audit_log_untrustedstring(ab, inode->i_sb->s_id);
+			audit_log_format(ab, " ino=%lu", inode->i_ino);
+		}
+		break;
+	}
+	case LSM_AUDIT_DATA_IOCTL_OP: {
+		struct inode *inode;
+
+		audit_log_d_path(ab, " path=", &a->u.op->path);
+
+		inode = a->u.op->path.dentry->d_inode;
+		if (inode) {
+			audit_log_format(ab, " dev=");
+			audit_log_untrustedstring(ab, inode->i_sb->s_id);
+			audit_log_format(ab, " ino=%lu", inode->i_ino);
+		}
+
+		audit_log_format(ab, " ioctlcmd=%hx", a->u.op->cmd);
+		break;
+	}
+	case LSM_AUDIT_DATA_DENTRY: {
+		struct inode *inode;
+
+		audit_log_format(ab, " name=");
+		audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
+
+		inode = d_backing_inode(a->u.dentry);
+		if (inode) {
+			audit_log_format(ab, " dev=");
+			audit_log_untrustedstring(ab, inode->i_sb->s_id);
+			audit_log_format(ab, " ino=%lu", inode->i_ino);
+		}
+		break;
+	}
+	case LSM_AUDIT_DATA_INODE: {
+		struct dentry *dentry;
+		struct inode *inode;
+
+		inode = a->u.inode;
+		dentry = d_find_alias(inode);
+		if (dentry) {
+			audit_log_format(ab, " name=");
+			audit_log_untrustedstring(ab,
+					 dentry->d_name.name);
+			dput(dentry);
+		}
+		audit_log_format(ab, " dev=");
+		audit_log_untrustedstring(ab, inode->i_sb->s_id);
+		audit_log_format(ab, " ino=%lu", inode->i_ino);
+		break;
+	}
+	case LSM_AUDIT_DATA_TASK: {
+		struct task_struct *tsk = a->u.tsk;
+		if (tsk) {
+			pid_t pid = task_pid_nr(tsk);
+			if (pid) {
+				char comm[sizeof(tsk->comm)];
+				audit_log_format(ab, " opid=%d ocomm=", pid);
+				audit_log_untrustedstring(ab,
+				    memcpy(comm, tsk->comm, sizeof(comm)));
+			}
+		}
+		break;
+	}
+	case LSM_AUDIT_DATA_NET:
+		if (a->u.net->sk) {
+			struct sock *sk = a->u.net->sk;
+			struct unix_sock *u;
+			int len = 0;
+			char *p = NULL;
+
+			switch (sk->sk_family) {
+			case AF_INET: {
+				struct inet_sock *inet = inet_sk(sk);
+
+				print_ipv4_addr(ab, inet->inet_rcv_saddr,
+						inet->inet_sport,
+						"laddr", "lport");
+				print_ipv4_addr(ab, inet->inet_daddr,
+						inet->inet_dport,
+						"faddr", "fport");
+				break;
+			}
+#if IS_ENABLED(CONFIG_IPV6)
+			case AF_INET6: {
+				struct inet_sock *inet = inet_sk(sk);
+
+				print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
+						inet->inet_sport,
+						"laddr", "lport");
+				print_ipv6_addr(ab, &sk->sk_v6_daddr,
+						inet->inet_dport,
+						"faddr", "fport");
+				break;
+			}
+#endif
+			case AF_UNIX:
+				u = unix_sk(sk);
+				if (u->path.dentry) {
+					audit_log_d_path(ab, " path=", &u->path);
+					break;
+				}
+				if (!u->addr)
+					break;
+				len = u->addr->len-sizeof(short);
+				p = &u->addr->name->sun_path[0];
+				audit_log_format(ab, " path=");
+				if (*p)
+					audit_log_untrustedstring(ab, p);
+				else
+					audit_log_n_hex(ab, p, len);
+				break;
+			}
+		}
+
+		switch (a->u.net->family) {
+		case AF_INET:
+			print_ipv4_addr(ab, a->u.net->v4info.saddr,
+					a->u.net->sport,
+					"saddr", "src");
+			print_ipv4_addr(ab, a->u.net->v4info.daddr,
+					a->u.net->dport,
+					"daddr", "dest");
+			break;
+		case AF_INET6:
+			print_ipv6_addr(ab, &a->u.net->v6info.saddr,
+					a->u.net->sport,
+					"saddr", "src");
+			print_ipv6_addr(ab, &a->u.net->v6info.daddr,
+					a->u.net->dport,
+					"daddr", "dest");
+			break;
+		}
+		if (a->u.net->netif > 0) {
+			struct net_device *dev;
+
+			/* NOTE: we always use init's namespace */
+			dev = dev_get_by_index(&init_net, a->u.net->netif);
+			if (dev) {
+				audit_log_format(ab, " netif=%s", dev->name);
+				dev_put(dev);
+			}
+		}
+		break;
+#ifdef CONFIG_KEYS
+	case LSM_AUDIT_DATA_KEY:
+		audit_log_format(ab, " key_serial=%u", a->u.key_struct.key);
+		if (a->u.key_struct.key_desc) {
+			audit_log_format(ab, " key_desc=");
+			audit_log_untrustedstring(ab, a->u.key_struct.key_desc);
+		}
+		break;
+#endif
+	case LSM_AUDIT_DATA_KMOD:
+		audit_log_format(ab, " kmod=");
+		audit_log_untrustedstring(ab, a->u.kmod_name);
+		break;
+	} /* switch (a->type) */
+}
+
+/**
+ * common_lsm_audit - generic LSM auditing function
+ * @a:  auxiliary audit data
+ * @pre_audit: lsm-specific pre-audit callback
+ * @post_audit: lsm-specific post-audit callback
+ *
+ * setup the audit buffer for common security information
+ * uses callback to print LSM specific information
+ */
+void common_lsm_audit(struct common_audit_data *a,
+	void (*pre_audit)(struct audit_buffer *, void *),
+	void (*post_audit)(struct audit_buffer *, void *))
+{
+	struct audit_buffer *ab;
+
+	if (a == NULL)
+		return;
+	/* we use GFP_ATOMIC so we won't sleep */
+	ab = audit_log_start(current->audit_context, GFP_ATOMIC | __GFP_NOWARN,
+			     AUDIT_AVC);
+
+	if (ab == NULL)
+		return;
+
+	if (pre_audit)
+		pre_audit(ab, a);
+
+	dump_common_audit_data(ab, a);
+
+	if (post_audit)
+		post_audit(ab, a);
+
+	audit_log_end(ab);
+}
diff --git a/security/min_addr.c b/security/min_addr.c
new file mode 100644
index 0000000..f728728
--- /dev/null
+++ b/security/min_addr.c
@@ -0,0 +1,52 @@
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/security.h>
+#include <linux/sysctl.h>
+
+/* amount of vm to protect from userspace access by both DAC and the LSM*/
+unsigned long mmap_min_addr;
+/* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */
+unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
+/* amount of vm to protect from userspace using the LSM = CONFIG_LSM_MMAP_MIN_ADDR */
+
+/*
+ * Update mmap_min_addr = max(dac_mmap_min_addr, CONFIG_LSM_MMAP_MIN_ADDR)
+ */
+static void update_mmap_min_addr(void)
+{
+#ifdef CONFIG_LSM_MMAP_MIN_ADDR
+	if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
+		mmap_min_addr = dac_mmap_min_addr;
+	else
+		mmap_min_addr = CONFIG_LSM_MMAP_MIN_ADDR;
+#else
+	mmap_min_addr = dac_mmap_min_addr;
+#endif
+}
+
+/*
+ * sysctl handler which just sets dac_mmap_min_addr = the new value and then
+ * calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly
+ */
+int mmap_min_addr_handler(struct ctl_table *table, int write,
+			  void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int ret;
+
+	if (write && !capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+
+	update_mmap_min_addr();
+
+	return ret;
+}
+
+static int __init init_mmap_min_addr(void)
+{
+	update_mmap_min_addr();
+
+	return 0;
+}
+pure_initcall(init_mmap_min_addr);
diff --git a/security/security.c b/security/security.c
new file mode 100644
index 0000000..46f405c
--- /dev/null
+++ b/security/security.c
@@ -0,0 +1,1885 @@
+/*
+ * Security plug functions
+ *
+ * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
+ * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ */
+
+#include <linux/capability.h>
+#include <linux/dcache.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/lsm_hooks.h>
+#include <linux/integrity.h>
+#include <linux/ima.h>
+#include <linux/evm.h>
+#include <linux/fsnotify.h>
+#include <linux/mman.h>
+#include <linux/mount.h>
+#include <linux/personality.h>
+#include <linux/backing-dev.h>
+#include <net/flow.h>
+
+#define MAX_LSM_EVM_XATTR	2
+
+/* Maximum number of letters for an LSM name string */
+#define SECURITY_NAME_MAX	10
+
+/* Boot-time LSM user choice */
+static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
+	CONFIG_DEFAULT_SECURITY;
+
+static void __init do_security_initcalls(void)
+{
+	initcall_t *call;
+	call = __security_initcall_start;
+	while (call < __security_initcall_end) {
+		(*call) ();
+		call++;
+	}
+}
+
+/**
+ * security_init - initializes the security framework
+ *
+ * This should be called early in the kernel initialization sequence.
+ */
+int __init security_init(void)
+{
+	pr_info("Security Framework initialized\n");
+
+	/*
+	 * Load minor LSMs, with the capability module always first.
+	 */
+	capability_add_hooks();
+	yama_add_hooks();
+
+	/*
+	 * Load all the remaining security modules.
+	 */
+	do_security_initcalls();
+
+	return 0;
+}
+
+/* Save user chosen LSM */
+static int __init choose_lsm(char *str)
+{
+	strncpy(chosen_lsm, str, SECURITY_NAME_MAX);
+	return 1;
+}
+__setup("security=", choose_lsm);
+
+/**
+ * security_module_enable - Load given security module on boot ?
+ * @module: the name of the module
+ *
+ * Each LSM must pass this method before registering its own operations
+ * to avoid security registration races. This method may also be used
+ * to check if your LSM is currently loaded during kernel initialization.
+ *
+ * Return true if:
+ *	-The passed LSM is the one chosen by user at boot time,
+ *	-or the passed LSM is configured as the default and the user did not
+ *	 choose an alternate LSM at boot time.
+ * Otherwise, return false.
+ */
+int __init security_module_enable(const char *module)
+{
+	return !strcmp(module, chosen_lsm);
+}
+
+/*
+ * Hook list operation macros.
+ *
+ * call_void_hook:
+ *	This is a hook that does not return a value.
+ *
+ * call_int_hook:
+ *	This is a hook that returns a value.
+ */
+
+#define call_void_hook(FUNC, ...)				\
+	do {							\
+		struct security_hook_list *P;			\
+								\
+		list_for_each_entry(P, &security_hook_heads.FUNC, list)	\
+			P->hook.FUNC(__VA_ARGS__);		\
+	} while (0)
+
+#define call_int_hook(FUNC, IRC, ...) ({			\
+	int RC = IRC;						\
+	do {							\
+		struct security_hook_list *P;			\
+								\
+		list_for_each_entry(P, &security_hook_heads.FUNC, list) { \
+			RC = P->hook.FUNC(__VA_ARGS__);		\
+			if (RC != 0)				\
+				break;				\
+		}						\
+	} while (0);						\
+	RC;							\
+})
+
+/* Security operations */
+
+int security_binder_set_context_mgr(struct task_struct *mgr)
+{
+	return call_int_hook(binder_set_context_mgr, 0, mgr);
+}
+
+int security_binder_transaction(struct task_struct *from,
+				struct task_struct *to)
+{
+	return call_int_hook(binder_transaction, 0, from, to);
+}
+
+int security_binder_transfer_binder(struct task_struct *from,
+				    struct task_struct *to)
+{
+	return call_int_hook(binder_transfer_binder, 0, from, to);
+}
+
+int security_binder_transfer_file(struct task_struct *from,
+				  struct task_struct *to, struct file *file)
+{
+	return call_int_hook(binder_transfer_file, 0, from, to, file);
+}
+
+int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
+{
+	return call_int_hook(ptrace_access_check, 0, child, mode);
+}
+
+int security_ptrace_traceme(struct task_struct *parent)
+{
+	return call_int_hook(ptrace_traceme, 0, parent);
+}
+
+int security_capget(struct task_struct *target,
+		     kernel_cap_t *effective,
+		     kernel_cap_t *inheritable,
+		     kernel_cap_t *permitted)
+{
+	return call_int_hook(capget, 0, target,
+				effective, inheritable, permitted);
+}
+
+int security_capset(struct cred *new, const struct cred *old,
+		    const kernel_cap_t *effective,
+		    const kernel_cap_t *inheritable,
+		    const kernel_cap_t *permitted)
+{
+	return call_int_hook(capset, 0, new, old,
+				effective, inheritable, permitted);
+}
+
+int security_capable(const struct cred *cred, struct user_namespace *ns,
+		     int cap)
+{
+	return call_int_hook(capable, 0, cred, ns, cap, SECURITY_CAP_AUDIT);
+}
+
+int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns,
+			     int cap)
+{
+	return call_int_hook(capable, 0, cred, ns, cap, SECURITY_CAP_NOAUDIT);
+}
+
+int security_quotactl(int cmds, int type, int id, struct super_block *sb)
+{
+	return call_int_hook(quotactl, 0, cmds, type, id, sb);
+}
+
+int security_quota_on(struct dentry *dentry)
+{
+	return call_int_hook(quota_on, 0, dentry);
+}
+
+int security_syslog(int type)
+{
+	return call_int_hook(syslog, 0, type);
+}
+
+int security_settime(const struct timespec *ts, const struct timezone *tz)
+{
+	return call_int_hook(settime, 0, ts, tz);
+}
+
+int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
+{
+	struct security_hook_list *hp;
+	int cap_sys_admin = 1;
+	int rc;
+
+	/*
+	 * The module will respond with a positive value if
+	 * it thinks the __vm_enough_memory() call should be
+	 * made with the cap_sys_admin set. If all of the modules
+	 * agree that it should be set it will. If any module
+	 * thinks it should not be set it won't.
+	 */
+	list_for_each_entry(hp, &security_hook_heads.vm_enough_memory, list) {
+		rc = hp->hook.vm_enough_memory(mm, pages);
+		if (rc <= 0) {
+			cap_sys_admin = 0;
+			break;
+		}
+	}
+	return __vm_enough_memory(mm, pages, cap_sys_admin);
+}
+
+int security_bprm_set_creds(struct linux_binprm *bprm)
+{
+	return call_int_hook(bprm_set_creds, 0, bprm);
+}
+
+int security_bprm_check(struct linux_binprm *bprm)
+{
+	int ret;
+
+	ret = call_int_hook(bprm_check_security, 0, bprm);
+	if (ret)
+		return ret;
+	return ima_bprm_check(bprm);
+}
+
+void security_bprm_committing_creds(struct linux_binprm *bprm)
+{
+	call_void_hook(bprm_committing_creds, bprm);
+}
+
+void security_bprm_committed_creds(struct linux_binprm *bprm)
+{
+	call_void_hook(bprm_committed_creds, bprm);
+}
+
+int security_bprm_secureexec(struct linux_binprm *bprm)
+{
+	return call_int_hook(bprm_secureexec, 0, bprm);
+}
+
+int security_sb_alloc(struct super_block *sb)
+{
+	return call_int_hook(sb_alloc_security, 0, sb);
+}
+
+void security_sb_free(struct super_block *sb)
+{
+	call_void_hook(sb_free_security, sb);
+}
+
+int security_sb_copy_data(char *orig, char *copy)
+{
+	return call_int_hook(sb_copy_data, 0, orig, copy);
+}
+EXPORT_SYMBOL(security_sb_copy_data);
+
+int security_sb_remount(struct super_block *sb, void *data)
+{
+	return call_int_hook(sb_remount, 0, sb, data);
+}
+
+int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
+{
+	return call_int_hook(sb_kern_mount, 0, sb, flags, data);
+}
+
+int security_sb_show_options(struct seq_file *m, struct super_block *sb)
+{
+	return call_int_hook(sb_show_options, 0, m, sb);
+}
+
+int security_sb_statfs(struct dentry *dentry)
+{
+	return call_int_hook(sb_statfs, 0, dentry);
+}
+
+int security_sb_mount(const char *dev_name, struct path *path,
+                       const char *type, unsigned long flags, void *data)
+{
+	return call_int_hook(sb_mount, 0, dev_name, path, type, flags, data);
+}
+
+int security_sb_umount(struct vfsmount *mnt, int flags)
+{
+	return call_int_hook(sb_umount, 0, mnt, flags);
+}
+
+int security_sb_pivotroot(struct path *old_path, struct path *new_path)
+{
+	return call_int_hook(sb_pivotroot, 0, old_path, new_path);
+}
+
+int security_sb_set_mnt_opts(struct super_block *sb,
+				struct security_mnt_opts *opts,
+				unsigned long kern_flags,
+				unsigned long *set_kern_flags)
+{
+	return call_int_hook(sb_set_mnt_opts,
+				opts->num_mnt_opts ? -EOPNOTSUPP : 0, sb,
+				opts, kern_flags, set_kern_flags);
+}
+EXPORT_SYMBOL(security_sb_set_mnt_opts);
+
+int security_sb_clone_mnt_opts(const struct super_block *oldsb,
+				struct super_block *newsb)
+{
+	return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb);
+}
+EXPORT_SYMBOL(security_sb_clone_mnt_opts);
+
+int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
+{
+	return call_int_hook(sb_parse_opts_str, 0, options, opts);
+}
+EXPORT_SYMBOL(security_sb_parse_opts_str);
+
+int security_inode_alloc(struct inode *inode)
+{
+	inode->i_security = NULL;
+	return call_int_hook(inode_alloc_security, 0, inode);
+}
+
+void security_inode_free(struct inode *inode)
+{
+	integrity_inode_free(inode);
+	call_void_hook(inode_free_security, inode);
+}
+
+int security_dentry_init_security(struct dentry *dentry, int mode,
+					struct qstr *name, void **ctx,
+					u32 *ctxlen)
+{
+	return call_int_hook(dentry_init_security, -EOPNOTSUPP, dentry, mode,
+				name, ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_dentry_init_security);
+
+int security_inode_init_security(struct inode *inode, struct inode *dir,
+				 const struct qstr *qstr,
+				 const initxattrs initxattrs, void *fs_data)
+{
+	struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1];
+	struct xattr *lsm_xattr, *evm_xattr, *xattr;
+	int ret;
+
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+
+	if (!initxattrs)
+		return call_int_hook(inode_init_security, -EOPNOTSUPP, inode,
+				     dir, qstr, NULL, NULL, NULL);
+	memset(new_xattrs, 0, sizeof(new_xattrs));
+	lsm_xattr = new_xattrs;
+	ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr,
+						&lsm_xattr->name,
+						&lsm_xattr->value,
+						&lsm_xattr->value_len);
+	if (ret)
+		goto out;
+
+	evm_xattr = lsm_xattr + 1;
+	ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr);
+	if (ret)
+		goto out;
+	ret = initxattrs(inode, new_xattrs, fs_data);
+out:
+	for (xattr = new_xattrs; xattr->value != NULL; xattr++)
+		kfree(xattr->value);
+	return (ret == -EOPNOTSUPP) ? 0 : ret;
+}
+EXPORT_SYMBOL(security_inode_init_security);
+
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+				     const struct qstr *qstr, const char **name,
+				     void **value, size_t *len)
+{
+	if (unlikely(IS_PRIVATE(inode)))
+		return -EOPNOTSUPP;
+	return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir,
+			     qstr, name, value, len);
+}
+EXPORT_SYMBOL(security_old_inode_init_security);
+
+#ifdef CONFIG_SECURITY_PATH
+int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
+			unsigned int dev)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
+		return 0;
+	return call_int_hook(path_mknod, 0, dir, dentry, mode, dev);
+}
+EXPORT_SYMBOL(security_path_mknod);
+
+int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
+		return 0;
+	return call_int_hook(path_mkdir, 0, dir, dentry, mode);
+}
+EXPORT_SYMBOL(security_path_mkdir);
+
+int security_path_rmdir(struct path *dir, struct dentry *dentry)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
+		return 0;
+	return call_int_hook(path_rmdir, 0, dir, dentry);
+}
+
+int security_path_unlink(struct path *dir, struct dentry *dentry)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
+		return 0;
+	return call_int_hook(path_unlink, 0, dir, dentry);
+}
+EXPORT_SYMBOL(security_path_unlink);
+
+int security_path_symlink(struct path *dir, struct dentry *dentry,
+			  const char *old_name)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
+		return 0;
+	return call_int_hook(path_symlink, 0, dir, dentry, old_name);
+}
+
+int security_path_link(struct dentry *old_dentry, struct path *new_dir,
+		       struct dentry *new_dentry)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
+		return 0;
+	return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
+}
+
+int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
+			 struct path *new_dir, struct dentry *new_dentry,
+			 unsigned int flags)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
+		     (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
+		return 0;
+
+	if (flags & RENAME_EXCHANGE) {
+		int err = call_int_hook(path_rename, 0, new_dir, new_dentry,
+					old_dir, old_dentry);
+		if (err)
+			return err;
+	}
+
+	return call_int_hook(path_rename, 0, old_dir, old_dentry, new_dir,
+				new_dentry);
+}
+EXPORT_SYMBOL(security_path_rename);
+
+int security_path_truncate(struct path *path)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
+		return 0;
+	return call_int_hook(path_truncate, 0, path);
+}
+
+int security_path_chmod(struct path *path, umode_t mode)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
+		return 0;
+	return call_int_hook(path_chmod, 0, path, mode);
+}
+
+int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
+		return 0;
+	return call_int_hook(path_chown, 0, path, uid, gid);
+}
+
+int security_path_chroot(struct path *path)
+{
+	return call_int_hook(path_chroot, 0, path);
+}
+#endif
+
+int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	if (unlikely(IS_PRIVATE(dir)))
+		return 0;
+	return call_int_hook(inode_create, 0, dir, dentry, mode);
+}
+EXPORT_SYMBOL_GPL(security_inode_create);
+
+int security_inode_link(struct dentry *old_dentry, struct inode *dir,
+			 struct dentry *new_dentry)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
+		return 0;
+	return call_int_hook(inode_link, 0, old_dentry, dir, new_dentry);
+}
+
+int security_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+		return 0;
+	return call_int_hook(inode_unlink, 0, dir, dentry);
+}
+
+int security_inode_symlink(struct inode *dir, struct dentry *dentry,
+			    const char *old_name)
+{
+	if (unlikely(IS_PRIVATE(dir)))
+		return 0;
+	return call_int_hook(inode_symlink, 0, dir, dentry, old_name);
+}
+
+int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	if (unlikely(IS_PRIVATE(dir)))
+		return 0;
+	return call_int_hook(inode_mkdir, 0, dir, dentry, mode);
+}
+EXPORT_SYMBOL_GPL(security_inode_mkdir);
+
+int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+		return 0;
+	return call_int_hook(inode_rmdir, 0, dir, dentry);
+}
+
+int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+{
+	if (unlikely(IS_PRIVATE(dir)))
+		return 0;
+	return call_int_hook(inode_mknod, 0, dir, dentry, mode, dev);
+}
+
+int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
+			   struct inode *new_dir, struct dentry *new_dentry,
+			   unsigned int flags)
+{
+        if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
+            (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
+		return 0;
+
+	if (flags & RENAME_EXCHANGE) {
+		int err = call_int_hook(inode_rename, 0, new_dir, new_dentry,
+						     old_dir, old_dentry);
+		if (err)
+			return err;
+	}
+
+	return call_int_hook(inode_rename, 0, old_dir, old_dentry,
+					   new_dir, new_dentry);
+}
+
+int security_inode_readlink(struct dentry *dentry)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+		return 0;
+	return call_int_hook(inode_readlink, 0, dentry);
+}
+
+int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
+			       bool rcu)
+{
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+	return call_int_hook(inode_follow_link, 0, dentry, inode, rcu);
+}
+
+int security_inode_permission(struct inode *inode, int mask)
+{
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+	return call_int_hook(inode_permission, 0, inode, mask);
+}
+
+int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	int ret;
+
+	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+		return 0;
+	ret = call_int_hook(inode_setattr, 0, dentry, attr);
+	if (ret)
+		return ret;
+	return evm_inode_setattr(dentry, attr);
+}
+EXPORT_SYMBOL_GPL(security_inode_setattr);
+
+int security_inode_getattr(const struct path *path)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
+		return 0;
+	return call_int_hook(inode_getattr, 0, path);
+}
+
+int security_inode_setxattr(struct dentry *dentry, const char *name,
+			    const void *value, size_t size, int flags)
+{
+	int ret;
+
+	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+		return 0;
+	/*
+	 * SELinux and Smack integrate the cap call,
+	 * so assume that all LSMs supplying this call do so.
+	 */
+	ret = call_int_hook(inode_setxattr, 1, dentry, name, value, size,
+				flags);
+
+	if (ret == 1)
+		ret = cap_inode_setxattr(dentry, name, value, size, flags);
+	if (ret)
+		return ret;
+	ret = ima_inode_setxattr(dentry, name, value, size);
+	if (ret)
+		return ret;
+	return evm_inode_setxattr(dentry, name, value, size);
+}
+
+void security_inode_post_setxattr(struct dentry *dentry, const char *name,
+				  const void *value, size_t size, int flags)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+		return;
+	call_void_hook(inode_post_setxattr, dentry, name, value, size, flags);
+	evm_inode_post_setxattr(dentry, name, value, size);
+}
+
+int security_inode_getxattr(struct dentry *dentry, const char *name)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+		return 0;
+	return call_int_hook(inode_getxattr, 0, dentry, name);
+}
+
+int security_inode_listxattr(struct dentry *dentry)
+{
+	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+		return 0;
+	return call_int_hook(inode_listxattr, 0, dentry);
+}
+
+int security_inode_removexattr(struct dentry *dentry, const char *name)
+{
+	int ret;
+
+	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+		return 0;
+	/*
+	 * SELinux and Smack integrate the cap call,
+	 * so assume that all LSMs supplying this call do so.
+	 */
+	ret = call_int_hook(inode_removexattr, 1, dentry, name);
+	if (ret == 1)
+		ret = cap_inode_removexattr(dentry, name);
+	if (ret)
+		return ret;
+	ret = ima_inode_removexattr(dentry, name);
+	if (ret)
+		return ret;
+	return evm_inode_removexattr(dentry, name);
+}
+
+int security_inode_need_killpriv(struct dentry *dentry)
+{
+	return call_int_hook(inode_need_killpriv, 0, dentry);
+}
+
+int security_inode_killpriv(struct dentry *dentry)
+{
+	return call_int_hook(inode_killpriv, 0, dentry);
+}
+
+int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
+{
+	if (unlikely(IS_PRIVATE(inode)))
+		return -EOPNOTSUPP;
+	return call_int_hook(inode_getsecurity, -EOPNOTSUPP, inode, name,
+				buffer, alloc);
+}
+
+int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
+{
+	if (unlikely(IS_PRIVATE(inode)))
+		return -EOPNOTSUPP;
+	return call_int_hook(inode_setsecurity, -EOPNOTSUPP, inode, name,
+				value, size, flags);
+}
+
+int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
+{
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+	return call_int_hook(inode_listsecurity, 0, inode, buffer, buffer_size);
+}
+EXPORT_SYMBOL(security_inode_listsecurity);
+
+void security_inode_getsecid(const struct inode *inode, u32 *secid)
+{
+	call_void_hook(inode_getsecid, inode, secid);
+}
+
+int security_file_permission(struct file *file, int mask)
+{
+	int ret;
+
+	ret = call_int_hook(file_permission, 0, file, mask);
+	if (ret)
+		return ret;
+
+	return fsnotify_perm(file, mask);
+}
+
+int security_file_alloc(struct file *file)
+{
+	return call_int_hook(file_alloc_security, 0, file);
+}
+
+void security_file_free(struct file *file)
+{
+	call_void_hook(file_free_security, file);
+}
+
+int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	return call_int_hook(file_ioctl, 0, file, cmd, arg);
+}
+
+static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
+{
+	/*
+	 * Does we have PROT_READ and does the application expect
+	 * it to imply PROT_EXEC?  If not, nothing to talk about...
+	 */
+	if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
+		return prot;
+	if (!(current->personality & READ_IMPLIES_EXEC))
+		return prot;
+	/*
+	 * if that's an anonymous mapping, let it.
+	 */
+	if (!file)
+		return prot | PROT_EXEC;
+	/*
+	 * ditto if it's not on noexec mount, except that on !MMU we need
+	 * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
+	 */
+	if (!path_noexec(&file->f_path)) {
+#ifndef CONFIG_MMU
+		if (file->f_op->mmap_capabilities) {
+			unsigned caps = file->f_op->mmap_capabilities(file);
+			if (!(caps & NOMMU_MAP_EXEC))
+				return prot;
+		}
+#endif
+		return prot | PROT_EXEC;
+	}
+	/* anything on noexec mount won't get PROT_EXEC */
+	return prot;
+}
+
+int security_mmap_file(struct file *file, unsigned long prot,
+			unsigned long flags)
+{
+	int ret;
+	ret = call_int_hook(mmap_file, 0, file, prot,
+					mmap_prot(file, prot), flags);
+	if (ret)
+		return ret;
+	return ima_file_mmap(file, prot);
+}
+
+int security_mmap_addr(unsigned long addr)
+{
+	return call_int_hook(mmap_addr, 0, addr);
+}
+
+int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
+			    unsigned long prot)
+{
+	return call_int_hook(file_mprotect, 0, vma, reqprot, prot);
+}
+
+int security_file_lock(struct file *file, unsigned int cmd)
+{
+	return call_int_hook(file_lock, 0, file, cmd);
+}
+
+int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	return call_int_hook(file_fcntl, 0, file, cmd, arg);
+}
+
+void security_file_set_fowner(struct file *file)
+{
+	call_void_hook(file_set_fowner, file);
+}
+
+int security_file_send_sigiotask(struct task_struct *tsk,
+				  struct fown_struct *fown, int sig)
+{
+	return call_int_hook(file_send_sigiotask, 0, tsk, fown, sig);
+}
+
+int security_file_receive(struct file *file)
+{
+	return call_int_hook(file_receive, 0, file);
+}
+
+int security_file_open(struct file *file, const struct cred *cred)
+{
+	int ret;
+
+	ret = call_int_hook(file_open, 0, file, cred);
+	if (ret)
+		return ret;
+
+	return fsnotify_perm(file, MAY_OPEN);
+}
+
+int security_task_create(unsigned long clone_flags)
+{
+	return call_int_hook(task_create, 0, clone_flags);
+}
+
+void security_task_free(struct task_struct *task)
+{
+	call_void_hook(task_free, task);
+}
+
+int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+	return call_int_hook(cred_alloc_blank, 0, cred, gfp);
+}
+
+void security_cred_free(struct cred *cred)
+{
+	call_void_hook(cred_free, cred);
+}
+
+int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
+{
+	return call_int_hook(cred_prepare, 0, new, old, gfp);
+}
+
+void security_transfer_creds(struct cred *new, const struct cred *old)
+{
+	call_void_hook(cred_transfer, new, old);
+}
+
+int security_kernel_act_as(struct cred *new, u32 secid)
+{
+	return call_int_hook(kernel_act_as, 0, new, secid);
+}
+
+int security_kernel_create_files_as(struct cred *new, struct inode *inode)
+{
+	return call_int_hook(kernel_create_files_as, 0, new, inode);
+}
+
+int security_kernel_fw_from_file(struct file *file, char *buf, size_t size)
+{
+	int ret;
+
+	ret = call_int_hook(kernel_fw_from_file, 0, file, buf, size);
+	if (ret)
+		return ret;
+	return ima_fw_from_file(file, buf, size);
+}
+EXPORT_SYMBOL_GPL(security_kernel_fw_from_file);
+
+int security_kernel_module_request(char *kmod_name)
+{
+	return call_int_hook(kernel_module_request, 0, kmod_name);
+}
+
+int security_kernel_module_from_file(struct file *file)
+{
+	int ret;
+
+	ret = call_int_hook(kernel_module_from_file, 0, file);
+	if (ret)
+		return ret;
+	return ima_module_check(file);
+}
+
+int security_task_fix_setuid(struct cred *new, const struct cred *old,
+			     int flags)
+{
+	return call_int_hook(task_fix_setuid, 0, new, old, flags);
+}
+
+int security_task_setpgid(struct task_struct *p, pid_t pgid)
+{
+	return call_int_hook(task_setpgid, 0, p, pgid);
+}
+
+int security_task_getpgid(struct task_struct *p)
+{
+	return call_int_hook(task_getpgid, 0, p);
+}
+
+int security_task_getsid(struct task_struct *p)
+{
+	return call_int_hook(task_getsid, 0, p);
+}
+
+void security_task_getsecid(struct task_struct *p, u32 *secid)
+{
+	*secid = 0;
+	call_void_hook(task_getsecid, p, secid);
+}
+EXPORT_SYMBOL(security_task_getsecid);
+
+int security_task_setnice(struct task_struct *p, int nice)
+{
+	return call_int_hook(task_setnice, 0, p, nice);
+}
+
+int security_task_setioprio(struct task_struct *p, int ioprio)
+{
+	return call_int_hook(task_setioprio, 0, p, ioprio);
+}
+
+int security_task_getioprio(struct task_struct *p)
+{
+	return call_int_hook(task_getioprio, 0, p);
+}
+
+int security_task_setrlimit(struct task_struct *p, unsigned int resource,
+		struct rlimit *new_rlim)
+{
+	return call_int_hook(task_setrlimit, 0, p, resource, new_rlim);
+}
+
+int security_task_setscheduler(struct task_struct *p)
+{
+	return call_int_hook(task_setscheduler, 0, p);
+}
+
+int security_task_getscheduler(struct task_struct *p)
+{
+	return call_int_hook(task_getscheduler, 0, p);
+}
+
+int security_task_movememory(struct task_struct *p)
+{
+	return call_int_hook(task_movememory, 0, p);
+}
+
+int security_task_kill(struct task_struct *p, struct siginfo *info,
+			int sig, u32 secid)
+{
+	return call_int_hook(task_kill, 0, p, info, sig, secid);
+}
+
+int security_task_wait(struct task_struct *p)
+{
+	return call_int_hook(task_wait, 0, p);
+}
+
+int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+			 unsigned long arg4, unsigned long arg5)
+{
+	int thisrc;
+	int rc = -ENOSYS;
+	struct security_hook_list *hp;
+
+	list_for_each_entry(hp, &security_hook_heads.task_prctl, list) {
+		thisrc = hp->hook.task_prctl(option, arg2, arg3, arg4, arg5);
+		if (thisrc != -ENOSYS) {
+			rc = thisrc;
+			if (thisrc != 0)
+				break;
+		}
+	}
+	return rc;
+}
+
+void security_task_to_inode(struct task_struct *p, struct inode *inode)
+{
+	call_void_hook(task_to_inode, p, inode);
+}
+
+int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
+{
+	return call_int_hook(ipc_permission, 0, ipcp, flag);
+}
+
+void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
+{
+	*secid = 0;
+	call_void_hook(ipc_getsecid, ipcp, secid);
+}
+
+int security_msg_msg_alloc(struct msg_msg *msg)
+{
+	return call_int_hook(msg_msg_alloc_security, 0, msg);
+}
+
+void security_msg_msg_free(struct msg_msg *msg)
+{
+	call_void_hook(msg_msg_free_security, msg);
+}
+
+int security_msg_queue_alloc(struct msg_queue *msq)
+{
+	return call_int_hook(msg_queue_alloc_security, 0, msq);
+}
+
+void security_msg_queue_free(struct msg_queue *msq)
+{
+	call_void_hook(msg_queue_free_security, msq);
+}
+
+int security_msg_queue_associate(struct msg_queue *msq, int msqflg)
+{
+	return call_int_hook(msg_queue_associate, 0, msq, msqflg);
+}
+
+int security_msg_queue_msgctl(struct msg_queue *msq, int cmd)
+{
+	return call_int_hook(msg_queue_msgctl, 0, msq, cmd);
+}
+
+int security_msg_queue_msgsnd(struct msg_queue *msq,
+			       struct msg_msg *msg, int msqflg)
+{
+	return call_int_hook(msg_queue_msgsnd, 0, msq, msg, msqflg);
+}
+
+int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
+			       struct task_struct *target, long type, int mode)
+{
+	return call_int_hook(msg_queue_msgrcv, 0, msq, msg, target, type, mode);
+}
+
+int security_shm_alloc(struct shmid_kernel *shp)
+{
+	return call_int_hook(shm_alloc_security, 0, shp);
+}
+
+void security_shm_free(struct shmid_kernel *shp)
+{
+	call_void_hook(shm_free_security, shp);
+}
+
+int security_shm_associate(struct shmid_kernel *shp, int shmflg)
+{
+	return call_int_hook(shm_associate, 0, shp, shmflg);
+}
+
+int security_shm_shmctl(struct shmid_kernel *shp, int cmd)
+{
+	return call_int_hook(shm_shmctl, 0, shp, cmd);
+}
+
+int security_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg)
+{
+	return call_int_hook(shm_shmat, 0, shp, shmaddr, shmflg);
+}
+
+int security_sem_alloc(struct sem_array *sma)
+{
+	return call_int_hook(sem_alloc_security, 0, sma);
+}
+
+void security_sem_free(struct sem_array *sma)
+{
+	call_void_hook(sem_free_security, sma);
+}
+
+int security_sem_associate(struct sem_array *sma, int semflg)
+{
+	return call_int_hook(sem_associate, 0, sma, semflg);
+}
+
+int security_sem_semctl(struct sem_array *sma, int cmd)
+{
+	return call_int_hook(sem_semctl, 0, sma, cmd);
+}
+
+int security_sem_semop(struct sem_array *sma, struct sembuf *sops,
+			unsigned nsops, int alter)
+{
+	return call_int_hook(sem_semop, 0, sma, sops, nsops, alter);
+}
+
+void security_d_instantiate(struct dentry *dentry, struct inode *inode)
+{
+	if (unlikely(inode && IS_PRIVATE(inode)))
+		return;
+	call_void_hook(d_instantiate, dentry, inode);
+}
+EXPORT_SYMBOL(security_d_instantiate);
+
+int security_getprocattr(struct task_struct *p, char *name, char **value)
+{
+	return call_int_hook(getprocattr, -EINVAL, p, name, value);
+}
+
+int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size)
+{
+	return call_int_hook(setprocattr, -EINVAL, p, name, value, size);
+}
+
+int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+	return call_int_hook(netlink_send, 0, sk, skb);
+}
+
+int security_ismaclabel(const char *name)
+{
+	return call_int_hook(ismaclabel, 0, name);
+}
+EXPORT_SYMBOL(security_ismaclabel);
+
+int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+	return call_int_hook(secid_to_secctx, -EOPNOTSUPP, secid, secdata,
+				seclen);
+}
+EXPORT_SYMBOL(security_secid_to_secctx);
+
+int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
+{
+	*secid = 0;
+	return call_int_hook(secctx_to_secid, 0, secdata, seclen, secid);
+}
+EXPORT_SYMBOL(security_secctx_to_secid);
+
+void security_release_secctx(char *secdata, u32 seclen)
+{
+	call_void_hook(release_secctx, secdata, seclen);
+}
+EXPORT_SYMBOL(security_release_secctx);
+
+int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+	return call_int_hook(inode_notifysecctx, 0, inode, ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_inode_notifysecctx);
+
+int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+	return call_int_hook(inode_setsecctx, 0, dentry, ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_inode_setsecctx);
+
+int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+	return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_inode_getsecctx);
+
+#ifdef CONFIG_SECURITY_NETWORK
+
+int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk)
+{
+	return call_int_hook(unix_stream_connect, 0, sock, other, newsk);
+}
+EXPORT_SYMBOL(security_unix_stream_connect);
+
+int security_unix_may_send(struct socket *sock,  struct socket *other)
+{
+	return call_int_hook(unix_may_send, 0, sock, other);
+}
+EXPORT_SYMBOL(security_unix_may_send);
+
+int security_socket_create(int family, int type, int protocol, int kern)
+{
+	return call_int_hook(socket_create, 0, family, type, protocol, kern);
+}
+
+int security_socket_post_create(struct socket *sock, int family,
+				int type, int protocol, int kern)
+{
+	return call_int_hook(socket_post_create, 0, sock, family, type,
+						protocol, kern);
+}
+
+int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
+{
+	return call_int_hook(socket_bind, 0, sock, address, addrlen);
+}
+
+int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen)
+{
+	return call_int_hook(socket_connect, 0, sock, address, addrlen);
+}
+
+int security_socket_listen(struct socket *sock, int backlog)
+{
+	return call_int_hook(socket_listen, 0, sock, backlog);
+}
+
+int security_socket_accept(struct socket *sock, struct socket *newsock)
+{
+	return call_int_hook(socket_accept, 0, sock, newsock);
+}
+
+int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
+{
+	return call_int_hook(socket_sendmsg, 0, sock, msg, size);
+}
+
+int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
+			    int size, int flags)
+{
+	return call_int_hook(socket_recvmsg, 0, sock, msg, size, flags);
+}
+
+int security_socket_getsockname(struct socket *sock)
+{
+	return call_int_hook(socket_getsockname, 0, sock);
+}
+
+int security_socket_getpeername(struct socket *sock)
+{
+	return call_int_hook(socket_getpeername, 0, sock);
+}
+
+int security_socket_getsockopt(struct socket *sock, int level, int optname)
+{
+	return call_int_hook(socket_getsockopt, 0, sock, level, optname);
+}
+
+int security_socket_setsockopt(struct socket *sock, int level, int optname)
+{
+	return call_int_hook(socket_setsockopt, 0, sock, level, optname);
+}
+
+int security_socket_shutdown(struct socket *sock, int how)
+{
+	return call_int_hook(socket_shutdown, 0, sock, how);
+}
+
+int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+	return call_int_hook(socket_sock_rcv_skb, 0, sk, skb);
+}
+EXPORT_SYMBOL(security_sock_rcv_skb);
+
+int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
+				      int __user *optlen, unsigned len)
+{
+	return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
+				optval, optlen, len);
+}
+
+int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
+{
+	return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
+			     skb, secid);
+}
+EXPORT_SYMBOL(security_socket_getpeersec_dgram);
+
+int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
+{
+	return call_int_hook(sk_alloc_security, 0, sk, family, priority);
+}
+
+void security_sk_free(struct sock *sk)
+{
+	call_void_hook(sk_free_security, sk);
+}
+
+void security_sk_clone(const struct sock *sk, struct sock *newsk)
+{
+	call_void_hook(sk_clone_security, sk, newsk);
+}
+EXPORT_SYMBOL(security_sk_clone);
+
+void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
+{
+	call_void_hook(sk_getsecid, sk, &fl->flowi_secid);
+}
+EXPORT_SYMBOL(security_sk_classify_flow);
+
+void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
+{
+	call_void_hook(req_classify_flow, req, fl);
+}
+EXPORT_SYMBOL(security_req_classify_flow);
+
+void security_sock_graft(struct sock *sk, struct socket *parent)
+{
+	call_void_hook(sock_graft, sk, parent);
+}
+EXPORT_SYMBOL(security_sock_graft);
+
+int security_inet_conn_request(struct sock *sk,
+			struct sk_buff *skb, struct request_sock *req)
+{
+	return call_int_hook(inet_conn_request, 0, sk, skb, req);
+}
+EXPORT_SYMBOL(security_inet_conn_request);
+
+void security_inet_csk_clone(struct sock *newsk,
+			const struct request_sock *req)
+{
+	call_void_hook(inet_csk_clone, newsk, req);
+}
+
+void security_inet_conn_established(struct sock *sk,
+			struct sk_buff *skb)
+{
+	call_void_hook(inet_conn_established, sk, skb);
+}
+
+int security_secmark_relabel_packet(u32 secid)
+{
+	return call_int_hook(secmark_relabel_packet, 0, secid);
+}
+EXPORT_SYMBOL(security_secmark_relabel_packet);
+
+void security_secmark_refcount_inc(void)
+{
+	call_void_hook(secmark_refcount_inc);
+}
+EXPORT_SYMBOL(security_secmark_refcount_inc);
+
+void security_secmark_refcount_dec(void)
+{
+	call_void_hook(secmark_refcount_dec);
+}
+EXPORT_SYMBOL(security_secmark_refcount_dec);
+
+int security_tun_dev_alloc_security(void **security)
+{
+	return call_int_hook(tun_dev_alloc_security, 0, security);
+}
+EXPORT_SYMBOL(security_tun_dev_alloc_security);
+
+void security_tun_dev_free_security(void *security)
+{
+	call_void_hook(tun_dev_free_security, security);
+}
+EXPORT_SYMBOL(security_tun_dev_free_security);
+
+int security_tun_dev_create(void)
+{
+	return call_int_hook(tun_dev_create, 0);
+}
+EXPORT_SYMBOL(security_tun_dev_create);
+
+int security_tun_dev_attach_queue(void *security)
+{
+	return call_int_hook(tun_dev_attach_queue, 0, security);
+}
+EXPORT_SYMBOL(security_tun_dev_attach_queue);
+
+int security_tun_dev_attach(struct sock *sk, void *security)
+{
+	return call_int_hook(tun_dev_attach, 0, sk, security);
+}
+EXPORT_SYMBOL(security_tun_dev_attach);
+
+int security_tun_dev_open(void *security)
+{
+	return call_int_hook(tun_dev_open, 0, security);
+}
+EXPORT_SYMBOL(security_tun_dev_open);
+
+#endif	/* CONFIG_SECURITY_NETWORK */
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+
+int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+			       struct xfrm_user_sec_ctx *sec_ctx,
+			       gfp_t gfp)
+{
+	return call_int_hook(xfrm_policy_alloc_security, 0, ctxp, sec_ctx, gfp);
+}
+EXPORT_SYMBOL(security_xfrm_policy_alloc);
+
+int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
+			      struct xfrm_sec_ctx **new_ctxp)
+{
+	return call_int_hook(xfrm_policy_clone_security, 0, old_ctx, new_ctxp);
+}
+
+void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
+{
+	call_void_hook(xfrm_policy_free_security, ctx);
+}
+EXPORT_SYMBOL(security_xfrm_policy_free);
+
+int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
+{
+	return call_int_hook(xfrm_policy_delete_security, 0, ctx);
+}
+
+int security_xfrm_state_alloc(struct xfrm_state *x,
+			      struct xfrm_user_sec_ctx *sec_ctx)
+{
+	return call_int_hook(xfrm_state_alloc, 0, x, sec_ctx);
+}
+EXPORT_SYMBOL(security_xfrm_state_alloc);
+
+int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
+				      struct xfrm_sec_ctx *polsec, u32 secid)
+{
+	return call_int_hook(xfrm_state_alloc_acquire, 0, x, polsec, secid);
+}
+
+int security_xfrm_state_delete(struct xfrm_state *x)
+{
+	return call_int_hook(xfrm_state_delete_security, 0, x);
+}
+EXPORT_SYMBOL(security_xfrm_state_delete);
+
+void security_xfrm_state_free(struct xfrm_state *x)
+{
+	call_void_hook(xfrm_state_free_security, x);
+}
+
+int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+{
+	return call_int_hook(xfrm_policy_lookup, 0, ctx, fl_secid, dir);
+}
+
+int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
+				       struct xfrm_policy *xp,
+				       const struct flowi *fl)
+{
+	struct security_hook_list *hp;
+	int rc = 1;
+
+	/*
+	 * Since this function is expected to return 0 or 1, the judgment
+	 * becomes difficult if multiple LSMs supply this call. Fortunately,
+	 * we can use the first LSM's judgment because currently only SELinux
+	 * supplies this call.
+	 *
+	 * For speed optimization, we explicitly break the loop rather than
+	 * using the macro
+	 */
+	list_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match,
+				list) {
+		rc = hp->hook.xfrm_state_pol_flow_match(x, xp, fl);
+		break;
+	}
+	return rc;
+}
+
+int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
+{
+	return call_int_hook(xfrm_decode_session, 0, skb, secid, 1);
+}
+
+void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
+{
+	int rc = call_int_hook(xfrm_decode_session, 0, skb, &fl->flowi_secid,
+				0);
+
+	BUG_ON(rc);
+}
+EXPORT_SYMBOL(security_skb_classify_flow);
+
+#endif	/* CONFIG_SECURITY_NETWORK_XFRM */
+
+#ifdef CONFIG_KEYS
+
+int security_key_alloc(struct key *key, const struct cred *cred,
+		       unsigned long flags)
+{
+	return call_int_hook(key_alloc, 0, key, cred, flags);
+}
+
+void security_key_free(struct key *key)
+{
+	call_void_hook(key_free, key);
+}
+
+int security_key_permission(key_ref_t key_ref,
+			    const struct cred *cred, unsigned perm)
+{
+	return call_int_hook(key_permission, 0, key_ref, cred, perm);
+}
+
+int security_key_getsecurity(struct key *key, char **_buffer)
+{
+	*_buffer = NULL;
+	return call_int_hook(key_getsecurity, 0, key, _buffer);
+}
+
+#endif	/* CONFIG_KEYS */
+
+#ifdef CONFIG_AUDIT
+
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
+{
+	return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule);
+}
+
+int security_audit_rule_known(struct audit_krule *krule)
+{
+	return call_int_hook(audit_rule_known, 0, krule);
+}
+
+void security_audit_rule_free(void *lsmrule)
+{
+	call_void_hook(audit_rule_free, lsmrule);
+}
+
+int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule,
+			      struct audit_context *actx)
+{
+	return call_int_hook(audit_rule_match, 0, secid, field, op, lsmrule,
+				actx);
+}
+#endif /* CONFIG_AUDIT */
+
+struct security_hook_heads security_hook_heads = {
+	.binder_set_context_mgr =
+		LIST_HEAD_INIT(security_hook_heads.binder_set_context_mgr),
+	.binder_transaction =
+		LIST_HEAD_INIT(security_hook_heads.binder_transaction),
+	.binder_transfer_binder =
+		LIST_HEAD_INIT(security_hook_heads.binder_transfer_binder),
+	.binder_transfer_file =
+		LIST_HEAD_INIT(security_hook_heads.binder_transfer_file),
+
+	.ptrace_access_check =
+		LIST_HEAD_INIT(security_hook_heads.ptrace_access_check),
+	.ptrace_traceme =
+		LIST_HEAD_INIT(security_hook_heads.ptrace_traceme),
+	.capget =	LIST_HEAD_INIT(security_hook_heads.capget),
+	.capset =	LIST_HEAD_INIT(security_hook_heads.capset),
+	.capable =	LIST_HEAD_INIT(security_hook_heads.capable),
+	.quotactl =	LIST_HEAD_INIT(security_hook_heads.quotactl),
+	.quota_on =	LIST_HEAD_INIT(security_hook_heads.quota_on),
+	.syslog =	LIST_HEAD_INIT(security_hook_heads.syslog),
+	.settime =	LIST_HEAD_INIT(security_hook_heads.settime),
+	.vm_enough_memory =
+		LIST_HEAD_INIT(security_hook_heads.vm_enough_memory),
+	.bprm_set_creds =
+		LIST_HEAD_INIT(security_hook_heads.bprm_set_creds),
+	.bprm_check_security =
+		LIST_HEAD_INIT(security_hook_heads.bprm_check_security),
+	.bprm_secureexec =
+		LIST_HEAD_INIT(security_hook_heads.bprm_secureexec),
+	.bprm_committing_creds =
+		LIST_HEAD_INIT(security_hook_heads.bprm_committing_creds),
+	.bprm_committed_creds =
+		LIST_HEAD_INIT(security_hook_heads.bprm_committed_creds),
+	.sb_alloc_security =
+		LIST_HEAD_INIT(security_hook_heads.sb_alloc_security),
+	.sb_free_security =
+		LIST_HEAD_INIT(security_hook_heads.sb_free_security),
+	.sb_copy_data =	LIST_HEAD_INIT(security_hook_heads.sb_copy_data),
+	.sb_remount =	LIST_HEAD_INIT(security_hook_heads.sb_remount),
+	.sb_kern_mount =
+		LIST_HEAD_INIT(security_hook_heads.sb_kern_mount),
+	.sb_show_options =
+		LIST_HEAD_INIT(security_hook_heads.sb_show_options),
+	.sb_statfs =	LIST_HEAD_INIT(security_hook_heads.sb_statfs),
+	.sb_mount =	LIST_HEAD_INIT(security_hook_heads.sb_mount),
+	.sb_umount =	LIST_HEAD_INIT(security_hook_heads.sb_umount),
+	.sb_pivotroot =	LIST_HEAD_INIT(security_hook_heads.sb_pivotroot),
+	.sb_set_mnt_opts =
+		LIST_HEAD_INIT(security_hook_heads.sb_set_mnt_opts),
+	.sb_clone_mnt_opts =
+		LIST_HEAD_INIT(security_hook_heads.sb_clone_mnt_opts),
+	.sb_parse_opts_str =
+		LIST_HEAD_INIT(security_hook_heads.sb_parse_opts_str),
+	.dentry_init_security =
+		LIST_HEAD_INIT(security_hook_heads.dentry_init_security),
+#ifdef CONFIG_SECURITY_PATH
+	.path_unlink =	LIST_HEAD_INIT(security_hook_heads.path_unlink),
+	.path_mkdir =	LIST_HEAD_INIT(security_hook_heads.path_mkdir),
+	.path_rmdir =	LIST_HEAD_INIT(security_hook_heads.path_rmdir),
+	.path_mknod =	LIST_HEAD_INIT(security_hook_heads.path_mknod),
+	.path_truncate =
+		LIST_HEAD_INIT(security_hook_heads.path_truncate),
+	.path_symlink =	LIST_HEAD_INIT(security_hook_heads.path_symlink),
+	.path_link =	LIST_HEAD_INIT(security_hook_heads.path_link),
+	.path_rename =	LIST_HEAD_INIT(security_hook_heads.path_rename),
+	.path_chmod =	LIST_HEAD_INIT(security_hook_heads.path_chmod),
+	.path_chown =	LIST_HEAD_INIT(security_hook_heads.path_chown),
+	.path_chroot =	LIST_HEAD_INIT(security_hook_heads.path_chroot),
+#endif
+	.inode_alloc_security =
+		LIST_HEAD_INIT(security_hook_heads.inode_alloc_security),
+	.inode_free_security =
+		LIST_HEAD_INIT(security_hook_heads.inode_free_security),
+	.inode_init_security =
+		LIST_HEAD_INIT(security_hook_heads.inode_init_security),
+	.inode_create =	LIST_HEAD_INIT(security_hook_heads.inode_create),
+	.inode_link =	LIST_HEAD_INIT(security_hook_heads.inode_link),
+	.inode_unlink =	LIST_HEAD_INIT(security_hook_heads.inode_unlink),
+	.inode_symlink =
+		LIST_HEAD_INIT(security_hook_heads.inode_symlink),
+	.inode_mkdir =	LIST_HEAD_INIT(security_hook_heads.inode_mkdir),
+	.inode_rmdir =	LIST_HEAD_INIT(security_hook_heads.inode_rmdir),
+	.inode_mknod =	LIST_HEAD_INIT(security_hook_heads.inode_mknod),
+	.inode_rename =	LIST_HEAD_INIT(security_hook_heads.inode_rename),
+	.inode_readlink =
+		LIST_HEAD_INIT(security_hook_heads.inode_readlink),
+	.inode_follow_link =
+		LIST_HEAD_INIT(security_hook_heads.inode_follow_link),
+	.inode_permission =
+		LIST_HEAD_INIT(security_hook_heads.inode_permission),
+	.inode_setattr =
+		LIST_HEAD_INIT(security_hook_heads.inode_setattr),
+	.inode_getattr =
+		LIST_HEAD_INIT(security_hook_heads.inode_getattr),
+	.inode_setxattr =
+		LIST_HEAD_INIT(security_hook_heads.inode_setxattr),
+	.inode_post_setxattr =
+		LIST_HEAD_INIT(security_hook_heads.inode_post_setxattr),
+	.inode_getxattr =
+		LIST_HEAD_INIT(security_hook_heads.inode_getxattr),
+	.inode_listxattr =
+		LIST_HEAD_INIT(security_hook_heads.inode_listxattr),
+	.inode_removexattr =
+		LIST_HEAD_INIT(security_hook_heads.inode_removexattr),
+	.inode_need_killpriv =
+		LIST_HEAD_INIT(security_hook_heads.inode_need_killpriv),
+	.inode_killpriv =
+		LIST_HEAD_INIT(security_hook_heads.inode_killpriv),
+	.inode_getsecurity =
+		LIST_HEAD_INIT(security_hook_heads.inode_getsecurity),
+	.inode_setsecurity =
+		LIST_HEAD_INIT(security_hook_heads.inode_setsecurity),
+	.inode_listsecurity =
+		LIST_HEAD_INIT(security_hook_heads.inode_listsecurity),
+	.inode_getsecid =
+		LIST_HEAD_INIT(security_hook_heads.inode_getsecid),
+	.file_permission =
+		LIST_HEAD_INIT(security_hook_heads.file_permission),
+	.file_alloc_security =
+		LIST_HEAD_INIT(security_hook_heads.file_alloc_security),
+	.file_free_security =
+		LIST_HEAD_INIT(security_hook_heads.file_free_security),
+	.file_ioctl =	LIST_HEAD_INIT(security_hook_heads.file_ioctl),
+	.mmap_addr =	LIST_HEAD_INIT(security_hook_heads.mmap_addr),
+	.mmap_file =	LIST_HEAD_INIT(security_hook_heads.mmap_file),
+	.file_mprotect =
+		LIST_HEAD_INIT(security_hook_heads.file_mprotect),
+	.file_lock =	LIST_HEAD_INIT(security_hook_heads.file_lock),
+	.file_fcntl =	LIST_HEAD_INIT(security_hook_heads.file_fcntl),
+	.file_set_fowner =
+		LIST_HEAD_INIT(security_hook_heads.file_set_fowner),
+	.file_send_sigiotask =
+		LIST_HEAD_INIT(security_hook_heads.file_send_sigiotask),
+	.file_receive =	LIST_HEAD_INIT(security_hook_heads.file_receive),
+	.file_open =	LIST_HEAD_INIT(security_hook_heads.file_open),
+	.task_create =	LIST_HEAD_INIT(security_hook_heads.task_create),
+	.task_free =	LIST_HEAD_INIT(security_hook_heads.task_free),
+	.cred_alloc_blank =
+		LIST_HEAD_INIT(security_hook_heads.cred_alloc_blank),
+	.cred_free =	LIST_HEAD_INIT(security_hook_heads.cred_free),
+	.cred_prepare =	LIST_HEAD_INIT(security_hook_heads.cred_prepare),
+	.cred_transfer =
+		LIST_HEAD_INIT(security_hook_heads.cred_transfer),
+	.kernel_act_as =
+		LIST_HEAD_INIT(security_hook_heads.kernel_act_as),
+	.kernel_create_files_as =
+		LIST_HEAD_INIT(security_hook_heads.kernel_create_files_as),
+	.kernel_fw_from_file =
+		LIST_HEAD_INIT(security_hook_heads.kernel_fw_from_file),
+	.kernel_module_request =
+		LIST_HEAD_INIT(security_hook_heads.kernel_module_request),
+	.kernel_module_from_file =
+		LIST_HEAD_INIT(security_hook_heads.kernel_module_from_file),
+	.task_fix_setuid =
+		LIST_HEAD_INIT(security_hook_heads.task_fix_setuid),
+	.task_setpgid =	LIST_HEAD_INIT(security_hook_heads.task_setpgid),
+	.task_getpgid =	LIST_HEAD_INIT(security_hook_heads.task_getpgid),
+	.task_getsid =	LIST_HEAD_INIT(security_hook_heads.task_getsid),
+	.task_getsecid =
+		LIST_HEAD_INIT(security_hook_heads.task_getsecid),
+	.task_setnice =	LIST_HEAD_INIT(security_hook_heads.task_setnice),
+	.task_setioprio =
+		LIST_HEAD_INIT(security_hook_heads.task_setioprio),
+	.task_getioprio =
+		LIST_HEAD_INIT(security_hook_heads.task_getioprio),
+	.task_setrlimit =
+		LIST_HEAD_INIT(security_hook_heads.task_setrlimit),
+	.task_setscheduler =
+		LIST_HEAD_INIT(security_hook_heads.task_setscheduler),
+	.task_getscheduler =
+		LIST_HEAD_INIT(security_hook_heads.task_getscheduler),
+	.task_movememory =
+		LIST_HEAD_INIT(security_hook_heads.task_movememory),
+	.task_kill =	LIST_HEAD_INIT(security_hook_heads.task_kill),
+	.task_wait =	LIST_HEAD_INIT(security_hook_heads.task_wait),
+	.task_prctl =	LIST_HEAD_INIT(security_hook_heads.task_prctl),
+	.task_to_inode =
+		LIST_HEAD_INIT(security_hook_heads.task_to_inode),
+	.ipc_permission =
+		LIST_HEAD_INIT(security_hook_heads.ipc_permission),
+	.ipc_getsecid =	LIST_HEAD_INIT(security_hook_heads.ipc_getsecid),
+	.msg_msg_alloc_security =
+		LIST_HEAD_INIT(security_hook_heads.msg_msg_alloc_security),
+	.msg_msg_free_security =
+		LIST_HEAD_INIT(security_hook_heads.msg_msg_free_security),
+	.msg_queue_alloc_security =
+		LIST_HEAD_INIT(security_hook_heads.msg_queue_alloc_security),
+	.msg_queue_free_security =
+		LIST_HEAD_INIT(security_hook_heads.msg_queue_free_security),
+	.msg_queue_associate =
+		LIST_HEAD_INIT(security_hook_heads.msg_queue_associate),
+	.msg_queue_msgctl =
+		LIST_HEAD_INIT(security_hook_heads.msg_queue_msgctl),
+	.msg_queue_msgsnd =
+		LIST_HEAD_INIT(security_hook_heads.msg_queue_msgsnd),
+	.msg_queue_msgrcv =
+		LIST_HEAD_INIT(security_hook_heads.msg_queue_msgrcv),
+	.shm_alloc_security =
+		LIST_HEAD_INIT(security_hook_heads.shm_alloc_security),
+	.shm_free_security =
+		LIST_HEAD_INIT(security_hook_heads.shm_free_security),
+	.shm_associate =
+		LIST_HEAD_INIT(security_hook_heads.shm_associate),
+	.shm_shmctl =	LIST_HEAD_INIT(security_hook_heads.shm_shmctl),
+	.shm_shmat =	LIST_HEAD_INIT(security_hook_heads.shm_shmat),
+	.sem_alloc_security =
+		LIST_HEAD_INIT(security_hook_heads.sem_alloc_security),
+	.sem_free_security =
+		LIST_HEAD_INIT(security_hook_heads.sem_free_security),
+	.sem_associate =
+		LIST_HEAD_INIT(security_hook_heads.sem_associate),
+	.sem_semctl =	LIST_HEAD_INIT(security_hook_heads.sem_semctl),
+	.sem_semop =	LIST_HEAD_INIT(security_hook_heads.sem_semop),
+	.netlink_send =	LIST_HEAD_INIT(security_hook_heads.netlink_send),
+	.d_instantiate =
+		LIST_HEAD_INIT(security_hook_heads.d_instantiate),
+	.getprocattr =	LIST_HEAD_INIT(security_hook_heads.getprocattr),
+	.setprocattr =	LIST_HEAD_INIT(security_hook_heads.setprocattr),
+	.ismaclabel =	LIST_HEAD_INIT(security_hook_heads.ismaclabel),
+	.secid_to_secctx =
+		LIST_HEAD_INIT(security_hook_heads.secid_to_secctx),
+	.secctx_to_secid =
+		LIST_HEAD_INIT(security_hook_heads.secctx_to_secid),
+	.release_secctx =
+		LIST_HEAD_INIT(security_hook_heads.release_secctx),
+	.inode_notifysecctx =
+		LIST_HEAD_INIT(security_hook_heads.inode_notifysecctx),
+	.inode_setsecctx =
+		LIST_HEAD_INIT(security_hook_heads.inode_setsecctx),
+	.inode_getsecctx =
+		LIST_HEAD_INIT(security_hook_heads.inode_getsecctx),
+#ifdef CONFIG_SECURITY_NETWORK
+	.unix_stream_connect =
+		LIST_HEAD_INIT(security_hook_heads.unix_stream_connect),
+	.unix_may_send =
+		LIST_HEAD_INIT(security_hook_heads.unix_may_send),
+	.socket_create =
+		LIST_HEAD_INIT(security_hook_heads.socket_create),
+	.socket_post_create =
+		LIST_HEAD_INIT(security_hook_heads.socket_post_create),
+	.socket_bind =	LIST_HEAD_INIT(security_hook_heads.socket_bind),
+	.socket_connect =
+		LIST_HEAD_INIT(security_hook_heads.socket_connect),
+	.socket_listen =
+		LIST_HEAD_INIT(security_hook_heads.socket_listen),
+	.socket_accept =
+		LIST_HEAD_INIT(security_hook_heads.socket_accept),
+	.socket_sendmsg =
+		LIST_HEAD_INIT(security_hook_heads.socket_sendmsg),
+	.socket_recvmsg =
+		LIST_HEAD_INIT(security_hook_heads.socket_recvmsg),
+	.socket_getsockname =
+		LIST_HEAD_INIT(security_hook_heads.socket_getsockname),
+	.socket_getpeername =
+		LIST_HEAD_INIT(security_hook_heads.socket_getpeername),
+	.socket_getsockopt =
+		LIST_HEAD_INIT(security_hook_heads.socket_getsockopt),
+	.socket_setsockopt =
+		LIST_HEAD_INIT(security_hook_heads.socket_setsockopt),
+	.socket_shutdown =
+		LIST_HEAD_INIT(security_hook_heads.socket_shutdown),
+	.socket_sock_rcv_skb =
+		LIST_HEAD_INIT(security_hook_heads.socket_sock_rcv_skb),
+	.socket_getpeersec_stream =
+		LIST_HEAD_INIT(security_hook_heads.socket_getpeersec_stream),
+	.socket_getpeersec_dgram =
+		LIST_HEAD_INIT(security_hook_heads.socket_getpeersec_dgram),
+	.sk_alloc_security =
+		LIST_HEAD_INIT(security_hook_heads.sk_alloc_security),
+	.sk_free_security =
+		LIST_HEAD_INIT(security_hook_heads.sk_free_security),
+	.sk_clone_security =
+		LIST_HEAD_INIT(security_hook_heads.sk_clone_security),
+	.sk_getsecid =	LIST_HEAD_INIT(security_hook_heads.sk_getsecid),
+	.sock_graft =	LIST_HEAD_INIT(security_hook_heads.sock_graft),
+	.inet_conn_request =
+		LIST_HEAD_INIT(security_hook_heads.inet_conn_request),
+	.inet_csk_clone =
+		LIST_HEAD_INIT(security_hook_heads.inet_csk_clone),
+	.inet_conn_established =
+		LIST_HEAD_INIT(security_hook_heads.inet_conn_established),
+	.secmark_relabel_packet =
+		LIST_HEAD_INIT(security_hook_heads.secmark_relabel_packet),
+	.secmark_refcount_inc =
+		LIST_HEAD_INIT(security_hook_heads.secmark_refcount_inc),
+	.secmark_refcount_dec =
+		LIST_HEAD_INIT(security_hook_heads.secmark_refcount_dec),
+	.req_classify_flow =
+		LIST_HEAD_INIT(security_hook_heads.req_classify_flow),
+	.tun_dev_alloc_security =
+		LIST_HEAD_INIT(security_hook_heads.tun_dev_alloc_security),
+	.tun_dev_free_security =
+		LIST_HEAD_INIT(security_hook_heads.tun_dev_free_security),
+	.tun_dev_create =
+		LIST_HEAD_INIT(security_hook_heads.tun_dev_create),
+	.tun_dev_attach_queue =
+		LIST_HEAD_INIT(security_hook_heads.tun_dev_attach_queue),
+	.tun_dev_attach =
+		LIST_HEAD_INIT(security_hook_heads.tun_dev_attach),
+	.tun_dev_open =	LIST_HEAD_INIT(security_hook_heads.tun_dev_open),
+	.skb_owned_by =	LIST_HEAD_INIT(security_hook_heads.skb_owned_by),
+#endif	/* CONFIG_SECURITY_NETWORK */
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+	.xfrm_policy_alloc_security =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_policy_alloc_security),
+	.xfrm_policy_clone_security =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_policy_clone_security),
+	.xfrm_policy_free_security =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_policy_free_security),
+	.xfrm_policy_delete_security =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_policy_delete_security),
+	.xfrm_state_alloc =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_state_alloc),
+	.xfrm_state_alloc_acquire =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_state_alloc_acquire),
+	.xfrm_state_free_security =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_state_free_security),
+	.xfrm_state_delete_security =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_state_delete_security),
+	.xfrm_policy_lookup =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_policy_lookup),
+	.xfrm_state_pol_flow_match =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_state_pol_flow_match),
+	.xfrm_decode_session =
+		LIST_HEAD_INIT(security_hook_heads.xfrm_decode_session),
+#endif	/* CONFIG_SECURITY_NETWORK_XFRM */
+#ifdef CONFIG_KEYS
+	.key_alloc =	LIST_HEAD_INIT(security_hook_heads.key_alloc),
+	.key_free =	LIST_HEAD_INIT(security_hook_heads.key_free),
+	.key_permission =
+		LIST_HEAD_INIT(security_hook_heads.key_permission),
+	.key_getsecurity =
+		LIST_HEAD_INIT(security_hook_heads.key_getsecurity),
+#endif	/* CONFIG_KEYS */
+#ifdef CONFIG_AUDIT
+	.audit_rule_init =
+		LIST_HEAD_INIT(security_hook_heads.audit_rule_init),
+	.audit_rule_known =
+		LIST_HEAD_INIT(security_hook_heads.audit_rule_known),
+	.audit_rule_match =
+		LIST_HEAD_INIT(security_hook_heads.audit_rule_match),
+	.audit_rule_free =
+		LIST_HEAD_INIT(security_hook_heads.audit_rule_free),
+#endif /* CONFIG_AUDIT */
+};
diff --git a/security/selinux/.gitignore b/security/selinux/.gitignore
new file mode 100644
index 0000000..2e5040a
--- /dev/null
+++ b/security/selinux/.gitignore
@@ -0,0 +1,2 @@
+av_permissions.h
+flask.h
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
new file mode 100644
index 0000000..8691e92
--- /dev/null
+++ b/security/selinux/Kconfig
@@ -0,0 +1,133 @@
+config SECURITY_SELINUX
+	bool "NSA SELinux Support"
+	depends on SECURITY_NETWORK && AUDIT && NET && INET
+	select NETWORK_SECMARK
+	default n
+	help
+	  This selects NSA Security-Enhanced Linux (SELinux).
+	  You will also need a policy configuration and a labeled filesystem.
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_SELINUX_BOOTPARAM
+	bool "NSA SELinux boot parameter"
+	depends on SECURITY_SELINUX
+	default n
+	help
+	  This option adds a kernel parameter 'selinux', which allows SELinux
+	  to be disabled at boot.  If this option is selected, SELinux
+	  functionality can be disabled with selinux=0 on the kernel
+	  command line.  The purpose of this option is to allow a single
+	  kernel image to be distributed with SELinux built in, but not
+	  necessarily enabled.
+
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_SELINUX_BOOTPARAM_VALUE
+	int "NSA SELinux boot parameter default value"
+	depends on SECURITY_SELINUX_BOOTPARAM
+	range 0 1
+	default 1
+	help
+	  This option sets the default value for the kernel parameter
+	  'selinux', which allows SELinux to be disabled at boot.  If this
+	  option is set to 0 (zero), the SELinux kernel parameter will
+	  default to 0, disabling SELinux at bootup.  If this option is
+	  set to 1 (one), the SELinux kernel parameter will default to 1,
+	  enabling SELinux at bootup.
+
+	  If you are unsure how to answer this question, answer 1.
+
+config SECURITY_SELINUX_DISABLE
+	bool "NSA SELinux runtime disable"
+	depends on SECURITY_SELINUX
+	default n
+	help
+	  This option enables writing to a selinuxfs node 'disable', which
+	  allows SELinux to be disabled at runtime prior to the policy load.
+	  SELinux will then remain disabled until the next boot.
+	  This option is similar to the selinux=0 boot parameter, but is to
+	  support runtime disabling of SELinux, e.g. from /sbin/init, for
+	  portability across platforms where boot parameters are difficult
+	  to employ.
+
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_SELINUX_DEVELOP
+	bool "NSA SELinux Development Support"
+	depends on SECURITY_SELINUX
+	default y
+	help
+	  This enables the development support option of NSA SELinux,
+	  which is useful for experimenting with SELinux and developing
+	  policies.  If unsure, say Y.  With this option enabled, the
+	  kernel will start in permissive mode (log everything, deny nothing)
+	  unless you specify enforcing=1 on the kernel command line.  You
+	  can interactively toggle the kernel between enforcing mode and
+	  permissive mode (if permitted by the policy) via /selinux/enforce.
+
+config SECURITY_SELINUX_AVC_STATS
+	bool "NSA SELinux AVC Statistics"
+	depends on SECURITY_SELINUX
+	default y
+	help
+	  This option collects access vector cache statistics to
+	  /selinux/avc/cache_stats, which may be monitored via
+	  tools such as avcstat.
+
+config SECURITY_SELINUX_CHECKREQPROT_VALUE
+	int "NSA SELinux checkreqprot default value"
+	depends on SECURITY_SELINUX
+	range 0 1
+	default 0
+	help
+	  This option sets the default value for the 'checkreqprot' flag
+	  that determines whether SELinux checks the protection requested
+	  by the application or the protection that will be applied by the
+	  kernel (including any implied execute for read-implies-exec) for
+	  mmap and mprotect calls.  If this option is set to 0 (zero),
+	  SELinux will default to checking the protection that will be applied
+	  by the kernel.  If this option is set to 1 (one), SELinux will
+	  default to checking the protection requested by the application.
+	  The checkreqprot flag may be changed from the default via the
+	  'checkreqprot=' boot parameter.  It may also be changed at runtime
+	  via /selinux/checkreqprot if authorized by policy.
+
+	  If you are unsure how to answer this question, answer 0.
+
+config SECURITY_SELINUX_POLICYDB_VERSION_MAX
+	bool "NSA SELinux maximum supported policy format version"
+	depends on SECURITY_SELINUX
+	default n
+	help
+	  This option enables the maximum policy format version supported
+	  by SELinux to be set to a particular value.  This value is reported
+	  to userspace via /selinux/policyvers and used at policy load time.
+	  It can be adjusted downward to support legacy userland (init) that
+	  does not correctly handle kernels that support newer policy versions.
+
+	  Examples:
+	  For the Fedora Core 3 or 4 Linux distributions, enable this option
+	  and set the value via the next option. For Fedora Core 5 and later,
+	  do not enable this option.
+
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
+	int "NSA SELinux maximum supported policy format version value"
+	depends on SECURITY_SELINUX_POLICYDB_VERSION_MAX
+	range 15 23
+	default 19
+	help
+	  This option sets the value for the maximum policy format version
+	  supported by SELinux.
+
+	  Examples:
+	  For Fedora Core 3, use 18.
+	  For Fedora Core 4, use 19.
+
+	  If you are unsure how to answer this question, look for the
+	  policy format version supported by your policy toolchain, by
+	  running 'checkpolicy -V'. Or look at what policy you have
+	  installed under /etc/selinux/$SELINUXTYPE/policy, where
+	  SELINUXTYPE is defined in your /etc/selinux/config.
+
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
new file mode 100644
index 0000000..ad5cd76
--- /dev/null
+++ b/security/selinux/Makefile
@@ -0,0 +1,25 @@
+#
+# Makefile for building the SELinux module as part of the kernel tree.
+#
+
+obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
+
+selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
+	     netnode.o netport.o exports.o \
+	     ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
+	     ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
+
+selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
+
+selinux-$(CONFIG_NETLABEL) += netlabel.o
+
+ccflags-y := -Isecurity/selinux -Isecurity/selinux/include
+
+$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
+
+quiet_cmd_flask = GEN     $(obj)/flask.h $(obj)/av_permissions.h
+      cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
+
+targets += flask.h av_permissions.h
+$(obj)/flask.h: $(src)/include/classmap.h FORCE
+	$(call if_changed,flask)
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
new file mode 100644
index 0000000..e60c79d
--- /dev/null
+++ b/security/selinux/avc.c
@@ -0,0 +1,1194 @@
+/*
+ * Implementation of the kernel access vector cache (AVC).
+ *
+ * Authors:  Stephen Smalley, <sds@epoch.ncsc.mil>
+ *	     James Morris <jmorris@redhat.com>
+ *
+ * Update:   KaiGai, Kohei <kaigai@ak.jp.nec.com>
+ *	Replaced the avc_lock spinlock by RCU.
+ *
+ * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License version 2,
+ *	as published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/percpu.h>
+#include <linux/list.h>
+#include <net/sock.h>
+#include <linux/un.h>
+#include <net/af_unix.h>
+#include <linux/ip.h>
+#include <linux/audit.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include "avc.h"
+#include "avc_ss.h"
+#include "classmap.h"
+
+#define AVC_CACHE_SLOTS			512
+#define AVC_DEF_CACHE_THRESHOLD		512
+#define AVC_CACHE_RECLAIM		16
+
+#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
+#define avc_cache_stats_incr(field)	this_cpu_inc(avc_cache_stats.field)
+#else
+#define avc_cache_stats_incr(field)	do {} while (0)
+#endif
+
+struct avc_entry {
+	u32			ssid;
+	u32			tsid;
+	u16			tclass;
+	struct av_decision	avd;
+	struct avc_xperms_node	*xp_node;
+};
+
+struct avc_node {
+	struct avc_entry	ae;
+	struct hlist_node	list; /* anchored in avc_cache->slots[i] */
+	struct rcu_head		rhead;
+};
+
+struct avc_xperms_decision_node {
+	struct extended_perms_decision xpd;
+	struct list_head xpd_list; /* list of extended_perms_decision */
+};
+
+struct avc_xperms_node {
+	struct extended_perms xp;
+	struct list_head xpd_head; /* list head of extended_perms_decision */
+};
+
+struct avc_cache {
+	struct hlist_head	slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
+	spinlock_t		slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
+	atomic_t		lru_hint;	/* LRU hint for reclaim scan */
+	atomic_t		active_nodes;
+	u32			latest_notif;	/* latest revocation notification */
+};
+
+struct avc_callback_node {
+	int (*callback) (u32 event);
+	u32 events;
+	struct avc_callback_node *next;
+};
+
+/* Exported via selinufs */
+unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
+
+#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
+DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
+#endif
+
+static struct avc_cache avc_cache;
+static struct avc_callback_node *avc_callbacks;
+static struct kmem_cache *avc_node_cachep;
+static struct kmem_cache *avc_xperms_data_cachep;
+static struct kmem_cache *avc_xperms_decision_cachep;
+static struct kmem_cache *avc_xperms_cachep;
+
+static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
+{
+	return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
+}
+
+/**
+ * avc_dump_av - Display an access vector in human-readable form.
+ * @tclass: target security class
+ * @av: access vector
+ */
+static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
+{
+	const char **perms;
+	int i, perm;
+
+	if (av == 0) {
+		audit_log_format(ab, " null");
+		return;
+	}
+
+	BUG_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map));
+	perms = secclass_map[tclass-1].perms;
+
+	audit_log_format(ab, " {");
+	i = 0;
+	perm = 1;
+	while (i < (sizeof(av) * 8)) {
+		if ((perm & av) && perms[i]) {
+			audit_log_format(ab, " %s", perms[i]);
+			av &= ~perm;
+		}
+		i++;
+		perm <<= 1;
+	}
+
+	if (av)
+		audit_log_format(ab, " 0x%x", av);
+
+	audit_log_format(ab, " }");
+}
+
+/**
+ * avc_dump_query - Display a SID pair and a class in human-readable form.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ */
+static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass)
+{
+	int rc;
+	char *scontext;
+	u32 scontext_len;
+
+	rc = security_sid_to_context(ssid, &scontext, &scontext_len);
+	if (rc)
+		audit_log_format(ab, "ssid=%d", ssid);
+	else {
+		audit_log_format(ab, "scontext=%s", scontext);
+		kfree(scontext);
+	}
+
+	rc = security_sid_to_context(tsid, &scontext, &scontext_len);
+	if (rc)
+		audit_log_format(ab, " tsid=%d", tsid);
+	else {
+		audit_log_format(ab, " tcontext=%s", scontext);
+		kfree(scontext);
+	}
+
+	BUG_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map));
+	audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name);
+}
+
+/**
+ * avc_init - Initialize the AVC.
+ *
+ * Initialize the access vector cache.
+ */
+void __init avc_init(void)
+{
+	int i;
+
+	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
+		INIT_HLIST_HEAD(&avc_cache.slots[i]);
+		spin_lock_init(&avc_cache.slots_lock[i]);
+	}
+	atomic_set(&avc_cache.active_nodes, 0);
+	atomic_set(&avc_cache.lru_hint, 0);
+
+	avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
+					0, SLAB_PANIC, NULL);
+	avc_xperms_cachep = kmem_cache_create("avc_xperms_node",
+					sizeof(struct avc_xperms_node),
+					0, SLAB_PANIC, NULL);
+	avc_xperms_decision_cachep = kmem_cache_create(
+					"avc_xperms_decision_node",
+					sizeof(struct avc_xperms_decision_node),
+					0, SLAB_PANIC, NULL);
+	avc_xperms_data_cachep = kmem_cache_create("avc_xperms_data",
+					sizeof(struct extended_perms_data),
+					0, SLAB_PANIC, NULL);
+
+	audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
+}
+
+int avc_get_hash_stats(char *page)
+{
+	int i, chain_len, max_chain_len, slots_used;
+	struct avc_node *node;
+	struct hlist_head *head;
+
+	rcu_read_lock();
+
+	slots_used = 0;
+	max_chain_len = 0;
+	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
+		head = &avc_cache.slots[i];
+		if (!hlist_empty(head)) {
+			slots_used++;
+			chain_len = 0;
+			hlist_for_each_entry_rcu(node, head, list)
+				chain_len++;
+			if (chain_len > max_chain_len)
+				max_chain_len = chain_len;
+		}
+	}
+
+	rcu_read_unlock();
+
+	return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
+			 "longest chain: %d\n",
+			 atomic_read(&avc_cache.active_nodes),
+			 slots_used, AVC_CACHE_SLOTS, max_chain_len);
+}
+
+/*
+ * using a linked list for extended_perms_decision lookup because the list is
+ * always small. i.e. less than 5, typically 1
+ */
+static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver,
+					struct avc_xperms_node *xp_node)
+{
+	struct avc_xperms_decision_node *xpd_node;
+
+	list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) {
+		if (xpd_node->xpd.driver == driver)
+			return &xpd_node->xpd;
+	}
+	return NULL;
+}
+
+static inline unsigned int
+avc_xperms_has_perm(struct extended_perms_decision *xpd,
+					u8 perm, u8 which)
+{
+	unsigned int rc = 0;
+
+	if ((which == XPERMS_ALLOWED) &&
+			(xpd->used & XPERMS_ALLOWED))
+		rc = security_xperm_test(xpd->allowed->p, perm);
+	else if ((which == XPERMS_AUDITALLOW) &&
+			(xpd->used & XPERMS_AUDITALLOW))
+		rc = security_xperm_test(xpd->auditallow->p, perm);
+	else if ((which == XPERMS_DONTAUDIT) &&
+			(xpd->used & XPERMS_DONTAUDIT))
+		rc = security_xperm_test(xpd->dontaudit->p, perm);
+	return rc;
+}
+
+static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node,
+				u8 driver, u8 perm)
+{
+	struct extended_perms_decision *xpd;
+	security_xperm_set(xp_node->xp.drivers.p, driver);
+	xpd = avc_xperms_decision_lookup(driver, xp_node);
+	if (xpd && xpd->allowed)
+		security_xperm_set(xpd->allowed->p, perm);
+}
+
+static void avc_xperms_decision_free(struct avc_xperms_decision_node *xpd_node)
+{
+	struct extended_perms_decision *xpd;
+
+	xpd = &xpd_node->xpd;
+	if (xpd->allowed)
+		kmem_cache_free(avc_xperms_data_cachep, xpd->allowed);
+	if (xpd->auditallow)
+		kmem_cache_free(avc_xperms_data_cachep, xpd->auditallow);
+	if (xpd->dontaudit)
+		kmem_cache_free(avc_xperms_data_cachep, xpd->dontaudit);
+	kmem_cache_free(avc_xperms_decision_cachep, xpd_node);
+}
+
+static void avc_xperms_free(struct avc_xperms_node *xp_node)
+{
+	struct avc_xperms_decision_node *xpd_node, *tmp;
+
+	if (!xp_node)
+		return;
+
+	list_for_each_entry_safe(xpd_node, tmp, &xp_node->xpd_head, xpd_list) {
+		list_del(&xpd_node->xpd_list);
+		avc_xperms_decision_free(xpd_node);
+	}
+	kmem_cache_free(avc_xperms_cachep, xp_node);
+}
+
+static void avc_copy_xperms_decision(struct extended_perms_decision *dest,
+					struct extended_perms_decision *src)
+{
+	dest->driver = src->driver;
+	dest->used = src->used;
+	if (dest->used & XPERMS_ALLOWED)
+		memcpy(dest->allowed->p, src->allowed->p,
+				sizeof(src->allowed->p));
+	if (dest->used & XPERMS_AUDITALLOW)
+		memcpy(dest->auditallow->p, src->auditallow->p,
+				sizeof(src->auditallow->p));
+	if (dest->used & XPERMS_DONTAUDIT)
+		memcpy(dest->dontaudit->p, src->dontaudit->p,
+				sizeof(src->dontaudit->p));
+}
+
+/*
+ * similar to avc_copy_xperms_decision, but only copy decision
+ * information relevant to this perm
+ */
+static inline void avc_quick_copy_xperms_decision(u8 perm,
+			struct extended_perms_decision *dest,
+			struct extended_perms_decision *src)
+{
+	/*
+	 * compute index of the u32 of the 256 bits (8 u32s) that contain this
+	 * command permission
+	 */
+	u8 i = perm >> 5;
+
+	dest->used = src->used;
+	if (dest->used & XPERMS_ALLOWED)
+		dest->allowed->p[i] = src->allowed->p[i];
+	if (dest->used & XPERMS_AUDITALLOW)
+		dest->auditallow->p[i] = src->auditallow->p[i];
+	if (dest->used & XPERMS_DONTAUDIT)
+		dest->dontaudit->p[i] = src->dontaudit->p[i];
+}
+
+static struct avc_xperms_decision_node
+		*avc_xperms_decision_alloc(u8 which)
+{
+	struct avc_xperms_decision_node *xpd_node;
+	struct extended_perms_decision *xpd;
+
+	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+				GFP_ATOMIC | __GFP_NOMEMALLOC);
+	if (!xpd_node)
+		return NULL;
+
+	xpd = &xpd_node->xpd;
+	if (which & XPERMS_ALLOWED) {
+		xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
+						GFP_ATOMIC | __GFP_NOMEMALLOC);
+		if (!xpd->allowed)
+			goto error;
+	}
+	if (which & XPERMS_AUDITALLOW) {
+		xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
+						GFP_ATOMIC | __GFP_NOMEMALLOC);
+		if (!xpd->auditallow)
+			goto error;
+	}
+	if (which & XPERMS_DONTAUDIT) {
+		xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
+						GFP_ATOMIC | __GFP_NOMEMALLOC);
+		if (!xpd->dontaudit)
+			goto error;
+	}
+	return xpd_node;
+error:
+	avc_xperms_decision_free(xpd_node);
+	return NULL;
+}
+
+static int avc_add_xperms_decision(struct avc_node *node,
+			struct extended_perms_decision *src)
+{
+	struct avc_xperms_decision_node *dest_xpd;
+
+	node->ae.xp_node->xp.len++;
+	dest_xpd = avc_xperms_decision_alloc(src->used);
+	if (!dest_xpd)
+		return -ENOMEM;
+	avc_copy_xperms_decision(&dest_xpd->xpd, src);
+	list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head);
+	return 0;
+}
+
+static struct avc_xperms_node *avc_xperms_alloc(void)
+{
+	struct avc_xperms_node *xp_node;
+
+	xp_node = kmem_cache_zalloc(avc_xperms_cachep,
+				GFP_ATOMIC|__GFP_NOMEMALLOC);
+	if (!xp_node)
+		return xp_node;
+	INIT_LIST_HEAD(&xp_node->xpd_head);
+	return xp_node;
+}
+
+static int avc_xperms_populate(struct avc_node *node,
+				struct avc_xperms_node *src)
+{
+	struct avc_xperms_node *dest;
+	struct avc_xperms_decision_node *dest_xpd;
+	struct avc_xperms_decision_node *src_xpd;
+
+	if (src->xp.len == 0)
+		return 0;
+	dest = avc_xperms_alloc();
+	if (!dest)
+		return -ENOMEM;
+
+	memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p));
+	dest->xp.len = src->xp.len;
+
+	/* for each source xpd allocate a destination xpd and copy */
+	list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) {
+		dest_xpd = avc_xperms_decision_alloc(src_xpd->xpd.used);
+		if (!dest_xpd)
+			goto error;
+		avc_copy_xperms_decision(&dest_xpd->xpd, &src_xpd->xpd);
+		list_add(&dest_xpd->xpd_list, &dest->xpd_head);
+	}
+	node->ae.xp_node = dest;
+	return 0;
+error:
+	avc_xperms_free(dest);
+	return -ENOMEM;
+
+}
+
+static inline u32 avc_xperms_audit_required(u32 requested,
+					struct av_decision *avd,
+					struct extended_perms_decision *xpd,
+					u8 perm,
+					int result,
+					u32 *deniedp)
+{
+	u32 denied, audited;
+
+	denied = requested & ~avd->allowed;
+	if (unlikely(denied)) {
+		audited = denied & avd->auditdeny;
+		if (audited && xpd) {
+			if (avc_xperms_has_perm(xpd, perm, XPERMS_DONTAUDIT))
+				audited &= ~requested;
+		}
+	} else if (result) {
+		audited = denied = requested;
+	} else {
+		audited = requested & avd->auditallow;
+		if (audited && xpd) {
+			if (!avc_xperms_has_perm(xpd, perm, XPERMS_AUDITALLOW))
+				audited &= ~requested;
+		}
+	}
+
+	*deniedp = denied;
+	return audited;
+}
+
+static inline int avc_xperms_audit(u32 ssid, u32 tsid, u16 tclass,
+				u32 requested, struct av_decision *avd,
+				struct extended_perms_decision *xpd,
+				u8 perm, int result,
+				struct common_audit_data *ad)
+{
+	u32 audited, denied;
+
+	audited = avc_xperms_audit_required(
+			requested, avd, xpd, perm, result, &denied);
+	if (likely(!audited))
+		return 0;
+	return slow_avc_audit(ssid, tsid, tclass, requested,
+			audited, denied, result, ad, 0);
+}
+
+static void avc_node_free(struct rcu_head *rhead)
+{
+	struct avc_node *node = container_of(rhead, struct avc_node, rhead);
+	avc_xperms_free(node->ae.xp_node);
+	kmem_cache_free(avc_node_cachep, node);
+	avc_cache_stats_incr(frees);
+}
+
+static void avc_node_delete(struct avc_node *node)
+{
+	hlist_del_rcu(&node->list);
+	call_rcu(&node->rhead, avc_node_free);
+	atomic_dec(&avc_cache.active_nodes);
+}
+
+static void avc_node_kill(struct avc_node *node)
+{
+	avc_xperms_free(node->ae.xp_node);
+	kmem_cache_free(avc_node_cachep, node);
+	avc_cache_stats_incr(frees);
+	atomic_dec(&avc_cache.active_nodes);
+}
+
+static void avc_node_replace(struct avc_node *new, struct avc_node *old)
+{
+	hlist_replace_rcu(&old->list, &new->list);
+	call_rcu(&old->rhead, avc_node_free);
+	atomic_dec(&avc_cache.active_nodes);
+}
+
+static inline int avc_reclaim_node(void)
+{
+	struct avc_node *node;
+	int hvalue, try, ecx;
+	unsigned long flags;
+	struct hlist_head *head;
+	spinlock_t *lock;
+
+	for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
+		hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
+		head = &avc_cache.slots[hvalue];
+		lock = &avc_cache.slots_lock[hvalue];
+
+		if (!spin_trylock_irqsave(lock, flags))
+			continue;
+
+		rcu_read_lock();
+		hlist_for_each_entry(node, head, list) {
+			avc_node_delete(node);
+			avc_cache_stats_incr(reclaims);
+			ecx++;
+			if (ecx >= AVC_CACHE_RECLAIM) {
+				rcu_read_unlock();
+				spin_unlock_irqrestore(lock, flags);
+				goto out;
+			}
+		}
+		rcu_read_unlock();
+		spin_unlock_irqrestore(lock, flags);
+	}
+out:
+	return ecx;
+}
+
+static struct avc_node *avc_alloc_node(void)
+{
+	struct avc_node *node;
+
+	node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
+	if (!node)
+		goto out;
+
+	INIT_HLIST_NODE(&node->list);
+	avc_cache_stats_incr(allocations);
+
+	if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold)
+		avc_reclaim_node();
+
+out:
+	return node;
+}
+
+static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
+{
+	node->ae.ssid = ssid;
+	node->ae.tsid = tsid;
+	node->ae.tclass = tclass;
+	memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
+}
+
+static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
+{
+	struct avc_node *node, *ret = NULL;
+	int hvalue;
+	struct hlist_head *head;
+
+	hvalue = avc_hash(ssid, tsid, tclass);
+	head = &avc_cache.slots[hvalue];
+	hlist_for_each_entry_rcu(node, head, list) {
+		if (ssid == node->ae.ssid &&
+		    tclass == node->ae.tclass &&
+		    tsid == node->ae.tsid) {
+			ret = node;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * avc_lookup - Look up an AVC entry.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ *
+ * Look up an AVC entry that is valid for the
+ * (@ssid, @tsid), interpreting the permissions
+ * based on @tclass.  If a valid AVC entry exists,
+ * then this function returns the avc_node.
+ * Otherwise, this function returns NULL.
+ */
+static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
+{
+	struct avc_node *node;
+
+	avc_cache_stats_incr(lookups);
+	node = avc_search_node(ssid, tsid, tclass);
+
+	if (node)
+		return node;
+
+	avc_cache_stats_incr(misses);
+	return NULL;
+}
+
+static int avc_latest_notif_update(int seqno, int is_insert)
+{
+	int ret = 0;
+	static DEFINE_SPINLOCK(notif_lock);
+	unsigned long flag;
+
+	spin_lock_irqsave(&notif_lock, flag);
+	if (is_insert) {
+		if (seqno < avc_cache.latest_notif) {
+			printk(KERN_WARNING "SELinux: avc:  seqno %d < latest_notif %d\n",
+			       seqno, avc_cache.latest_notif);
+			ret = -EAGAIN;
+		}
+	} else {
+		if (seqno > avc_cache.latest_notif)
+			avc_cache.latest_notif = seqno;
+	}
+	spin_unlock_irqrestore(&notif_lock, flag);
+
+	return ret;
+}
+
+/**
+ * avc_insert - Insert an AVC entry.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @avd: resulting av decision
+ * @xp_node: resulting extended permissions
+ *
+ * Insert an AVC entry for the SID pair
+ * (@ssid, @tsid) and class @tclass.
+ * The access vectors and the sequence number are
+ * normally provided by the security server in
+ * response to a security_compute_av() call.  If the
+ * sequence number @avd->seqno is not less than the latest
+ * revocation notification, then the function copies
+ * the access vectors into a cache entry, returns
+ * avc_node inserted. Otherwise, this function returns NULL.
+ */
+static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass,
+				struct av_decision *avd,
+				struct avc_xperms_node *xp_node)
+{
+	struct avc_node *pos, *node = NULL;
+	int hvalue;
+	unsigned long flag;
+
+	if (avc_latest_notif_update(avd->seqno, 1))
+		goto out;
+
+	node = avc_alloc_node();
+	if (node) {
+		struct hlist_head *head;
+		spinlock_t *lock;
+		int rc = 0;
+
+		hvalue = avc_hash(ssid, tsid, tclass);
+		avc_node_populate(node, ssid, tsid, tclass, avd);
+		rc = avc_xperms_populate(node, xp_node);
+		if (rc) {
+			kmem_cache_free(avc_node_cachep, node);
+			return NULL;
+		}
+		head = &avc_cache.slots[hvalue];
+		lock = &avc_cache.slots_lock[hvalue];
+
+		spin_lock_irqsave(lock, flag);
+		hlist_for_each_entry(pos, head, list) {
+			if (pos->ae.ssid == ssid &&
+			    pos->ae.tsid == tsid &&
+			    pos->ae.tclass == tclass) {
+				avc_node_replace(node, pos);
+				goto found;
+			}
+		}
+		hlist_add_head_rcu(&node->list, head);
+found:
+		spin_unlock_irqrestore(lock, flag);
+	}
+out:
+	return node;
+}
+
+/**
+ * avc_audit_pre_callback - SELinux specific information
+ * will be called by generic audit code
+ * @ab: the audit buffer
+ * @a: audit_data
+ */
+static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
+{
+	struct common_audit_data *ad = a;
+	audit_log_format(ab, "avc:  %s ",
+			 ad->selinux_audit_data->denied ? "denied" : "granted");
+	avc_dump_av(ab, ad->selinux_audit_data->tclass,
+			ad->selinux_audit_data->audited);
+	audit_log_format(ab, " for ");
+}
+
+/**
+ * avc_audit_post_callback - SELinux specific information
+ * will be called by generic audit code
+ * @ab: the audit buffer
+ * @a: audit_data
+ */
+static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
+{
+	struct common_audit_data *ad = a;
+	audit_log_format(ab, " ");
+	avc_dump_query(ab, ad->selinux_audit_data->ssid,
+			   ad->selinux_audit_data->tsid,
+			   ad->selinux_audit_data->tclass);
+	if (ad->selinux_audit_data->denied) {
+		audit_log_format(ab, " permissive=%u",
+				 ad->selinux_audit_data->result ? 0 : 1);
+	}
+}
+
+/* This is the slow part of avc audit with big stack footprint */
+noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
+		u32 requested, u32 audited, u32 denied, int result,
+		struct common_audit_data *a,
+		unsigned flags)
+{
+	struct common_audit_data stack_data;
+	struct selinux_audit_data sad;
+
+	if (!a) {
+		a = &stack_data;
+		a->type = LSM_AUDIT_DATA_NONE;
+	}
+
+	/*
+	 * When in a RCU walk do the audit on the RCU retry.  This is because
+	 * the collection of the dname in an inode audit message is not RCU
+	 * safe.  Note this may drop some audits when the situation changes
+	 * during retry. However this is logically just as if the operation
+	 * happened a little later.
+	 */
+	if ((a->type == LSM_AUDIT_DATA_INODE) &&
+	    (flags & MAY_NOT_BLOCK))
+		return -ECHILD;
+
+	sad.tclass = tclass;
+	sad.requested = requested;
+	sad.ssid = ssid;
+	sad.tsid = tsid;
+	sad.audited = audited;
+	sad.denied = denied;
+	sad.result = result;
+
+	a->selinux_audit_data = &sad;
+
+	common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
+	return 0;
+}
+
+/**
+ * avc_add_callback - Register a callback for security events.
+ * @callback: callback function
+ * @events: security events
+ *
+ * Register a callback function for events in the set @events.
+ * Returns %0 on success or -%ENOMEM if insufficient memory
+ * exists to add the callback.
+ */
+int __init avc_add_callback(int (*callback)(u32 event), u32 events)
+{
+	struct avc_callback_node *c;
+	int rc = 0;
+
+	c = kmalloc(sizeof(*c), GFP_KERNEL);
+	if (!c) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	c->callback = callback;
+	c->events = events;
+	c->next = avc_callbacks;
+	avc_callbacks = c;
+out:
+	return rc;
+}
+
+/**
+ * avc_update_node Update an AVC entry
+ * @event : Updating event
+ * @perms : Permission mask bits
+ * @ssid,@tsid,@tclass : identifier of an AVC entry
+ * @seqno : sequence number when decision was made
+ * @xpd: extended_perms_decision to be added to the node
+ *
+ * if a valid AVC entry doesn't exist,this function returns -ENOENT.
+ * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
+ * otherwise, this function updates the AVC entry. The original AVC-entry object
+ * will release later by RCU.
+ */
+static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
+			u32 tsid, u16 tclass, u32 seqno,
+			struct extended_perms_decision *xpd,
+			u32 flags)
+{
+	int hvalue, rc = 0;
+	unsigned long flag;
+	struct avc_node *pos, *node, *orig = NULL;
+	struct hlist_head *head;
+	spinlock_t *lock;
+
+	node = avc_alloc_node();
+	if (!node) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Lock the target slot */
+	hvalue = avc_hash(ssid, tsid, tclass);
+
+	head = &avc_cache.slots[hvalue];
+	lock = &avc_cache.slots_lock[hvalue];
+
+	spin_lock_irqsave(lock, flag);
+
+	hlist_for_each_entry(pos, head, list) {
+		if (ssid == pos->ae.ssid &&
+		    tsid == pos->ae.tsid &&
+		    tclass == pos->ae.tclass &&
+		    seqno == pos->ae.avd.seqno){
+			orig = pos;
+			break;
+		}
+	}
+
+	if (!orig) {
+		rc = -ENOENT;
+		avc_node_kill(node);
+		goto out_unlock;
+	}
+
+	/*
+	 * Copy and replace original node.
+	 */
+
+	avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
+
+	if (orig->ae.xp_node) {
+		rc = avc_xperms_populate(node, orig->ae.xp_node);
+		if (rc) {
+			kmem_cache_free(avc_node_cachep, node);
+			goto out_unlock;
+		}
+	}
+
+	switch (event) {
+	case AVC_CALLBACK_GRANT:
+		node->ae.avd.allowed |= perms;
+		if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS))
+			avc_xperms_allow_perm(node->ae.xp_node, driver, xperm);
+		break;
+	case AVC_CALLBACK_TRY_REVOKE:
+	case AVC_CALLBACK_REVOKE:
+		node->ae.avd.allowed &= ~perms;
+		break;
+	case AVC_CALLBACK_AUDITALLOW_ENABLE:
+		node->ae.avd.auditallow |= perms;
+		break;
+	case AVC_CALLBACK_AUDITALLOW_DISABLE:
+		node->ae.avd.auditallow &= ~perms;
+		break;
+	case AVC_CALLBACK_AUDITDENY_ENABLE:
+		node->ae.avd.auditdeny |= perms;
+		break;
+	case AVC_CALLBACK_AUDITDENY_DISABLE:
+		node->ae.avd.auditdeny &= ~perms;
+		break;
+	case AVC_CALLBACK_ADD_XPERMS:
+		avc_add_xperms_decision(node, xpd);
+		break;
+	}
+	avc_node_replace(node, orig);
+out_unlock:
+	spin_unlock_irqrestore(lock, flag);
+out:
+	return rc;
+}
+
+/**
+ * avc_flush - Flush the cache
+ */
+static void avc_flush(void)
+{
+	struct hlist_head *head;
+	struct avc_node *node;
+	spinlock_t *lock;
+	unsigned long flag;
+	int i;
+
+	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
+		head = &avc_cache.slots[i];
+		lock = &avc_cache.slots_lock[i];
+
+		spin_lock_irqsave(lock, flag);
+		/*
+		 * With preemptable RCU, the outer spinlock does not
+		 * prevent RCU grace periods from ending.
+		 */
+		rcu_read_lock();
+		hlist_for_each_entry(node, head, list)
+			avc_node_delete(node);
+		rcu_read_unlock();
+		spin_unlock_irqrestore(lock, flag);
+	}
+}
+
+/**
+ * avc_ss_reset - Flush the cache and revalidate migrated permissions.
+ * @seqno: policy sequence number
+ */
+int avc_ss_reset(u32 seqno)
+{
+	struct avc_callback_node *c;
+	int rc = 0, tmprc;
+
+	avc_flush();
+
+	for (c = avc_callbacks; c; c = c->next) {
+		if (c->events & AVC_CALLBACK_RESET) {
+			tmprc = c->callback(AVC_CALLBACK_RESET);
+			/* save the first error encountered for the return
+			   value and continue processing the callbacks */
+			if (!rc)
+				rc = tmprc;
+		}
+	}
+
+	avc_latest_notif_update(seqno, 0);
+	return rc;
+}
+
+/*
+ * Slow-path helper function for avc_has_perm_noaudit,
+ * when the avc_node lookup fails. We get called with
+ * the RCU read lock held, and need to return with it
+ * still held, but drop if for the security compute.
+ *
+ * Don't inline this, since it's the slow-path and just
+ * results in a bigger stack frame.
+ */
+static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
+			 u16 tclass, struct av_decision *avd,
+			 struct avc_xperms_node *xp_node)
+{
+	rcu_read_unlock();
+	INIT_LIST_HEAD(&xp_node->xpd_head);
+	security_compute_av(ssid, tsid, tclass, avd, &xp_node->xp);
+	rcu_read_lock();
+	return avc_insert(ssid, tsid, tclass, avd, xp_node);
+}
+
+static noinline int avc_denied(u32 ssid, u32 tsid,
+				u16 tclass, u32 requested,
+				u8 driver, u8 xperm, unsigned flags,
+				struct av_decision *avd)
+{
+	if (flags & AVC_STRICT)
+		return -EACCES;
+
+	if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
+		return -EACCES;
+
+	avc_update_node(AVC_CALLBACK_GRANT, requested, driver, xperm, ssid,
+				tsid, tclass, avd->seqno, NULL, flags);
+	return 0;
+}
+
+/*
+ * The avc extended permissions logic adds an additional 256 bits of
+ * permissions to an avc node when extended permissions for that node are
+ * specified in the avtab. If the additional 256 permissions is not adequate,
+ * as-is the case with ioctls, then multiple may be chained together and the
+ * driver field is used to specify which set contains the permission.
+ */
+int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+			u8 driver, u8 xperm, struct common_audit_data *ad)
+{
+	struct avc_node *node;
+	struct av_decision avd;
+	u32 denied;
+	struct extended_perms_decision local_xpd;
+	struct extended_perms_decision *xpd = NULL;
+	struct extended_perms_data allowed;
+	struct extended_perms_data auditallow;
+	struct extended_perms_data dontaudit;
+	struct avc_xperms_node local_xp_node;
+	struct avc_xperms_node *xp_node;
+	int rc = 0, rc2;
+
+	xp_node = &local_xp_node;
+	BUG_ON(!requested);
+
+	rcu_read_lock();
+
+	node = avc_lookup(ssid, tsid, tclass);
+	if (unlikely(!node)) {
+		node = avc_compute_av(ssid, tsid, tclass, &avd, xp_node);
+	} else {
+		memcpy(&avd, &node->ae.avd, sizeof(avd));
+		xp_node = node->ae.xp_node;
+	}
+	/* if extended permissions are not defined, only consider av_decision */
+	if (!xp_node || !xp_node->xp.len)
+		goto decision;
+
+	local_xpd.allowed = &allowed;
+	local_xpd.auditallow = &auditallow;
+	local_xpd.dontaudit = &dontaudit;
+
+	xpd = avc_xperms_decision_lookup(driver, xp_node);
+	if (unlikely(!xpd)) {
+		/*
+		 * Compute the extended_perms_decision only if the driver
+		 * is flagged
+		 */
+		if (!security_xperm_test(xp_node->xp.drivers.p, driver)) {
+			avd.allowed &= ~requested;
+			goto decision;
+		}
+		rcu_read_unlock();
+		security_compute_xperms_decision(ssid, tsid, tclass, driver,
+						&local_xpd);
+		rcu_read_lock();
+		avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, driver, xperm,
+				ssid, tsid, tclass, avd.seqno, &local_xpd, 0);
+	} else {
+		avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd);
+	}
+	xpd = &local_xpd;
+
+	if (!avc_xperms_has_perm(xpd, xperm, XPERMS_ALLOWED))
+		avd.allowed &= ~requested;
+
+decision:
+	denied = requested & ~(avd.allowed);
+	if (unlikely(denied))
+		rc = avc_denied(ssid, tsid, tclass, requested, driver, xperm,
+				AVC_EXTENDED_PERMS, &avd);
+
+	rcu_read_unlock();
+
+	rc2 = avc_xperms_audit(ssid, tsid, tclass, requested,
+			&avd, xpd, xperm, rc, ad);
+	if (rc2)
+		return rc2;
+	return rc;
+}
+
+/**
+ * avc_has_perm_noaudit - Check permissions but perform no auditing.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @requested: requested permissions, interpreted based on @tclass
+ * @flags:  AVC_STRICT or 0
+ * @avd: access vector decisions
+ *
+ * Check the AVC to determine whether the @requested permissions are granted
+ * for the SID pair (@ssid, @tsid), interpreting the permissions
+ * based on @tclass, and call the security server on a cache miss to obtain
+ * a new decision and add it to the cache.  Return a copy of the decisions
+ * in @avd.  Return %0 if all @requested permissions are granted,
+ * -%EACCES if any permissions are denied, or another -errno upon
+ * other errors.  This function is typically called by avc_has_perm(),
+ * but may also be called directly to separate permission checking from
+ * auditing, e.g. in cases where a lock must be held for the check but
+ * should be released for the auditing.
+ */
+inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
+			 u16 tclass, u32 requested,
+			 unsigned flags,
+			 struct av_decision *avd)
+{
+	struct avc_node *node;
+	struct avc_xperms_node xp_node;
+	int rc = 0;
+	u32 denied;
+
+	BUG_ON(!requested);
+
+	rcu_read_lock();
+
+	node = avc_lookup(ssid, tsid, tclass);
+	if (unlikely(!node))
+		node = avc_compute_av(ssid, tsid, tclass, avd, &xp_node);
+	else
+		memcpy(avd, &node->ae.avd, sizeof(*avd));
+
+	denied = requested & ~(avd->allowed);
+	if (unlikely(denied))
+		rc = avc_denied(ssid, tsid, tclass, requested, 0, 0, flags, avd);
+
+	rcu_read_unlock();
+	return rc;
+}
+
+/**
+ * avc_has_perm - Check permissions and perform any appropriate auditing.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @requested: requested permissions, interpreted based on @tclass
+ * @auditdata: auxiliary audit data
+ *
+ * Check the AVC to determine whether the @requested permissions are granted
+ * for the SID pair (@ssid, @tsid), interpreting the permissions
+ * based on @tclass, and call the security server on a cache miss to obtain
+ * a new decision and add it to the cache.  Audit the granting or denial of
+ * permissions in accordance with the policy.  Return %0 if all @requested
+ * permissions are granted, -%EACCES if any permissions are denied, or
+ * another -errno upon other errors.
+ */
+int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
+		 u32 requested, struct common_audit_data *auditdata)
+{
+	struct av_decision avd;
+	int rc, rc2;
+
+	rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
+
+	rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 0);
+	if (rc2)
+		return rc2;
+	return rc;
+}
+
+int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass,
+		       u32 requested, struct common_audit_data *auditdata,
+		       int flags)
+{
+	struct av_decision avd;
+	int rc, rc2;
+
+	rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
+
+	rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc,
+			auditdata, flags);
+	if (rc2)
+		return rc2;
+	return rc;
+}
+
+u32 avc_policy_seqno(void)
+{
+	return avc_cache.latest_notif;
+}
+
+void avc_disable(void)
+{
+	/*
+	 * If you are looking at this because you have realized that we are
+	 * not destroying the avc_node_cachep it might be easy to fix, but
+	 * I don't know the memory barrier semantics well enough to know.  It's
+	 * possible that some other task dereferenced security_ops when
+	 * it still pointed to selinux operations.  If that is the case it's
+	 * possible that it is about to use the avc and is about to need the
+	 * avc_node_cachep.  I know I could wrap the security.c security_ops call
+	 * in an rcu_lock, but seriously, it's not worth it.  Instead I just flush
+	 * the cache and get that memory back.
+	 */
+	if (avc_node_cachep) {
+		avc_flush();
+		/* kmem_cache_destroy(avc_node_cachep); */
+	}
+}
diff --git a/security/selinux/exports.c b/security/selinux/exports.c
new file mode 100644
index 0000000..e75dd94
--- /dev/null
+++ b/security/selinux/exports.c
@@ -0,0 +1,23 @@
+/*
+ * SELinux services exported to the rest of the kernel.
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ * Copyright (C) 2006 Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez <tinytim@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/selinux.h>
+
+#include "security.h"
+
+bool selinux_is_enabled(void)
+{
+	return selinux_enabled;
+}
+EXPORT_SYMBOL_GPL(selinux_is_enabled);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
new file mode 100644
index 0000000..4b56c3b
--- /dev/null
+++ b/security/selinux/hooks.c
@@ -0,0 +1,6233 @@
+/*
+ *  NSA Security-Enhanced Linux (SELinux) security module
+ *
+ *  This file contains the SELinux hook function implementations.
+ *
+ *  Authors:  Stephen Smalley, <sds@epoch.ncsc.mil>
+ *	      Chris Vance, <cvance@nai.com>
+ *	      Wayne Salamon, <wsalamon@nai.com>
+ *	      James Morris <jmorris@redhat.com>
+ *
+ *  Copyright (C) 2001,2002 Networks Associates Technology, Inc.
+ *  Copyright (C) 2003-2008 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *					   Eric Paris <eparis@redhat.com>
+ *  Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
+ *			    <dgoeddel@trustedcs.com>
+ *  Copyright (C) 2006, 2007, 2009 Hewlett-Packard Development Company, L.P.
+ *	Paul Moore <paul@paul-moore.com>
+ *  Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
+ *		       Yuichi Nakamura <ynakam@hitachisoft.jp>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License version 2,
+ *	as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kd.h>
+#include <linux/kernel.h>
+#include <linux/tracehook.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/lsm_hooks.h>
+#include <linux/xattr.h>
+#include <linux/capability.h>
+#include <linux/unistd.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
+#include <linux/swap.h>
+#include <linux/spinlock.h>
+#include <linux/syscalls.h>
+#include <linux/dcache.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/tty.h>
+#include <net/icmp.h>
+#include <net/ip.h>		/* for local_port_range[] */
+#include <net/tcp.h>		/* struct or_callable used in sock_rcv_skb */
+#include <net/inet_connection_sock.h>
+#include <net/net_namespace.h>
+#include <net/netlabel.h>
+#include <linux/uaccess.h>
+#include <asm/ioctls.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>	/* for network interface checks */
+#include <net/netlink.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/dccp.h>
+#include <linux/quota.h>
+#include <linux/un.h>		/* for Unix socket types */
+#include <net/af_unix.h>	/* for Unix socket types */
+#include <linux/parser.h>
+#include <linux/nfs_mount.h>
+#include <net/ipv6.h>
+#include <linux/hugetlb.h>
+#include <linux/personality.h>
+#include <linux/audit.h>
+#include <linux/string.h>
+#include <linux/selinux.h>
+#include <linux/mutex.h>
+#include <linux/posix-timers.h>
+#include <linux/syslog.h>
+#include <linux/user_namespace.h>
+#include <linux/export.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+
+#include "avc.h"
+#include "objsec.h"
+#include "netif.h"
+#include "netnode.h"
+#include "netport.h"
+#include "xfrm.h"
+#include "netlabel.h"
+#include "audit.h"
+#include "avc_ss.h"
+
+/* SECMARK reference count */
+static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
+
+#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
+int selinux_enforcing;
+
+static int __init enforcing_setup(char *str)
+{
+	unsigned long enforcing;
+	if (!kstrtoul(str, 0, &enforcing))
+		selinux_enforcing = enforcing ? 1 : 0;
+	return 1;
+}
+__setup("enforcing=", enforcing_setup);
+#endif
+
+#ifdef CONFIG_SECURITY_SELINUX_BOOTPARAM
+int selinux_enabled = CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE;
+
+static int __init selinux_enabled_setup(char *str)
+{
+	unsigned long enabled;
+	if (!kstrtoul(str, 0, &enabled))
+		selinux_enabled = enabled ? 1 : 0;
+	return 1;
+}
+__setup("selinux=", selinux_enabled_setup);
+#else
+int selinux_enabled = 1;
+#endif
+
+static struct kmem_cache *sel_inode_cache;
+static struct kmem_cache *file_security_cache;
+
+/**
+ * selinux_secmark_enabled - Check to see if SECMARK is currently enabled
+ *
+ * Description:
+ * This function checks the SECMARK reference counter to see if any SECMARK
+ * targets are currently configured, if the reference counter is greater than
+ * zero SECMARK is considered to be enabled.  Returns true (1) if SECMARK is
+ * enabled, false (0) if SECMARK is disabled.  If the always_check_network
+ * policy capability is enabled, SECMARK is always considered enabled.
+ *
+ */
+static int selinux_secmark_enabled(void)
+{
+	return (selinux_policycap_alwaysnetwork || atomic_read(&selinux_secmark_refcount));
+}
+
+/**
+ * selinux_peerlbl_enabled - Check to see if peer labeling is currently enabled
+ *
+ * Description:
+ * This function checks if NetLabel or labeled IPSEC is enabled.  Returns true
+ * (1) if any are enabled or false (0) if neither are enabled.  If the
+ * always_check_network policy capability is enabled, peer labeling
+ * is always considered enabled.
+ *
+ */
+static int selinux_peerlbl_enabled(void)
+{
+	return (selinux_policycap_alwaysnetwork || netlbl_enabled() || selinux_xfrm_enabled());
+}
+
+static int selinux_netcache_avc_callback(u32 event)
+{
+	if (event == AVC_CALLBACK_RESET) {
+		sel_netif_flush();
+		sel_netnode_flush();
+		sel_netport_flush();
+		synchronize_net();
+	}
+	return 0;
+}
+
+/*
+ * initialise the security for the init task
+ */
+static void cred_init_security(void)
+{
+	struct cred *cred = (struct cred *) current->real_cred;
+	struct task_security_struct *tsec;
+
+	tsec = kzalloc(sizeof(struct task_security_struct), GFP_KERNEL);
+	if (!tsec)
+		panic("SELinux:  Failed to initialize initial task.\n");
+
+	tsec->osid = tsec->sid = SECINITSID_KERNEL;
+	cred->security = tsec;
+}
+
+/*
+ * get the security ID of a set of credentials
+ */
+static inline u32 cred_sid(const struct cred *cred)
+{
+	const struct task_security_struct *tsec;
+
+	tsec = cred->security;
+	return tsec->sid;
+}
+
+/*
+ * get the objective security ID of a task
+ */
+static inline u32 task_sid(const struct task_struct *task)
+{
+	u32 sid;
+
+	rcu_read_lock();
+	sid = cred_sid(__task_cred(task));
+	rcu_read_unlock();
+	return sid;
+}
+
+/*
+ * get the subjective security ID of the current task
+ */
+static inline u32 current_sid(void)
+{
+	const struct task_security_struct *tsec = current_security();
+
+	return tsec->sid;
+}
+
+/* Allocate and free functions for each kind of security blob. */
+
+static int inode_alloc_security(struct inode *inode)
+{
+	struct inode_security_struct *isec;
+	u32 sid = current_sid();
+
+	isec = kmem_cache_zalloc(sel_inode_cache, GFP_NOFS);
+	if (!isec)
+		return -ENOMEM;
+
+	mutex_init(&isec->lock);
+	INIT_LIST_HEAD(&isec->list);
+	isec->inode = inode;
+	isec->sid = SECINITSID_UNLABELED;
+	isec->sclass = SECCLASS_FILE;
+	isec->task_sid = sid;
+	inode->i_security = isec;
+
+	return 0;
+}
+
+static void inode_free_rcu(struct rcu_head *head)
+{
+	struct inode_security_struct *isec;
+
+	isec = container_of(head, struct inode_security_struct, rcu);
+	kmem_cache_free(sel_inode_cache, isec);
+}
+
+static void inode_free_security(struct inode *inode)
+{
+	struct inode_security_struct *isec = inode->i_security;
+	struct superblock_security_struct *sbsec = inode->i_sb->s_security;
+
+	/*
+	 * As not all inode security structures are in a list, we check for
+	 * empty list outside of the lock to make sure that we won't waste
+	 * time taking a lock doing nothing.
+	 *
+	 * The list_del_init() function can be safely called more than once.
+	 * It should not be possible for this function to be called with
+	 * concurrent list_add(), but for better safety against future changes
+	 * in the code, we use list_empty_careful() here.
+	 */
+	if (!list_empty_careful(&isec->list)) {
+		spin_lock(&sbsec->isec_lock);
+		list_del_init(&isec->list);
+		spin_unlock(&sbsec->isec_lock);
+	}
+
+	/*
+	 * The inode may still be referenced in a path walk and
+	 * a call to selinux_inode_permission() can be made
+	 * after inode_free_security() is called. Ideally, the VFS
+	 * wouldn't do this, but fixing that is a much harder
+	 * job. For now, simply free the i_security via RCU, and
+	 * leave the current inode->i_security pointer intact.
+	 * The inode will be freed after the RCU grace period too.
+	 */
+	call_rcu(&isec->rcu, inode_free_rcu);
+}
+
+static int file_alloc_security(struct file *file)
+{
+	struct file_security_struct *fsec;
+	u32 sid = current_sid();
+
+	fsec = kmem_cache_zalloc(file_security_cache, GFP_KERNEL);
+	if (!fsec)
+		return -ENOMEM;
+
+	fsec->sid = sid;
+	fsec->fown_sid = sid;
+	file->f_security = fsec;
+
+	return 0;
+}
+
+static void file_free_security(struct file *file)
+{
+	struct file_security_struct *fsec = file->f_security;
+	file->f_security = NULL;
+	kmem_cache_free(file_security_cache, fsec);
+}
+
+static int superblock_alloc_security(struct super_block *sb)
+{
+	struct superblock_security_struct *sbsec;
+
+	sbsec = kzalloc(sizeof(struct superblock_security_struct), GFP_KERNEL);
+	if (!sbsec)
+		return -ENOMEM;
+
+	mutex_init(&sbsec->lock);
+	INIT_LIST_HEAD(&sbsec->isec_head);
+	spin_lock_init(&sbsec->isec_lock);
+	sbsec->sb = sb;
+	sbsec->sid = SECINITSID_UNLABELED;
+	sbsec->def_sid = SECINITSID_FILE;
+	sbsec->mntpoint_sid = SECINITSID_UNLABELED;
+	sb->s_security = sbsec;
+
+	return 0;
+}
+
+static void superblock_free_security(struct super_block *sb)
+{
+	struct superblock_security_struct *sbsec = sb->s_security;
+	sb->s_security = NULL;
+	kfree(sbsec);
+}
+
+/* The file system's label must be initialized prior to use. */
+
+static const char *labeling_behaviors[7] = {
+	"uses xattr",
+	"uses transition SIDs",
+	"uses task SIDs",
+	"uses genfs_contexts",
+	"not configured for labeling",
+	"uses mountpoint labeling",
+	"uses native labeling",
+};
+
+static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
+
+static inline int inode_doinit(struct inode *inode)
+{
+	return inode_doinit_with_dentry(inode, NULL);
+}
+
+enum {
+	Opt_error = -1,
+	Opt_context = 1,
+	Opt_fscontext = 2,
+	Opt_defcontext = 3,
+	Opt_rootcontext = 4,
+	Opt_labelsupport = 5,
+	Opt_nextmntopt = 6,
+};
+
+#define NUM_SEL_MNT_OPTS	(Opt_nextmntopt - 1)
+
+static const match_table_t tokens = {
+	{Opt_context, CONTEXT_STR "%s"},
+	{Opt_fscontext, FSCONTEXT_STR "%s"},
+	{Opt_defcontext, DEFCONTEXT_STR "%s"},
+	{Opt_rootcontext, ROOTCONTEXT_STR "%s"},
+	{Opt_labelsupport, LABELSUPP_STR},
+	{Opt_error, NULL},
+};
+
+#define SEL_MOUNT_FAIL_MSG "SELinux:  duplicate or incompatible mount options\n"
+
+static int may_context_mount_sb_relabel(u32 sid,
+			struct superblock_security_struct *sbsec,
+			const struct cred *cred)
+{
+	const struct task_security_struct *tsec = cred->security;
+	int rc;
+
+	rc = avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
+			  FILESYSTEM__RELABELFROM, NULL);
+	if (rc)
+		return rc;
+
+	rc = avc_has_perm(tsec->sid, sid, SECCLASS_FILESYSTEM,
+			  FILESYSTEM__RELABELTO, NULL);
+	return rc;
+}
+
+static int may_context_mount_inode_relabel(u32 sid,
+			struct superblock_security_struct *sbsec,
+			const struct cred *cred)
+{
+	const struct task_security_struct *tsec = cred->security;
+	int rc;
+	rc = avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
+			  FILESYSTEM__RELABELFROM, NULL);
+	if (rc)
+		return rc;
+
+	rc = avc_has_perm(sid, sbsec->sid, SECCLASS_FILESYSTEM,
+			  FILESYSTEM__ASSOCIATE, NULL);
+	return rc;
+}
+
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+	struct superblock_security_struct *sbsec = sb->s_security;
+
+	return sbsec->behavior == SECURITY_FS_USE_XATTR ||
+		sbsec->behavior == SECURITY_FS_USE_TRANS ||
+		sbsec->behavior == SECURITY_FS_USE_TASK ||
+		sbsec->behavior == SECURITY_FS_USE_NATIVE ||
+		/* Special handling. Genfs but also in-core setxattr handler */
+		!strcmp(sb->s_type->name, "sysfs") ||
+		!strcmp(sb->s_type->name, "pstore") ||
+		!strcmp(sb->s_type->name, "debugfs") ||
+		!strcmp(sb->s_type->name, "rootfs");
+}
+
+static int sb_finish_set_opts(struct super_block *sb)
+{
+	struct superblock_security_struct *sbsec = sb->s_security;
+	struct dentry *root = sb->s_root;
+	struct inode *root_inode = d_backing_inode(root);
+	int rc = 0;
+
+	if (sbsec->behavior == SECURITY_FS_USE_XATTR) {
+		/* Make sure that the xattr handler exists and that no
+		   error other than -ENODATA is returned by getxattr on
+		   the root directory.  -ENODATA is ok, as this may be
+		   the first boot of the SELinux kernel before we have
+		   assigned xattr values to the filesystem. */
+		if (!root_inode->i_op->getxattr) {
+			printk(KERN_WARNING "SELinux: (dev %s, type %s) has no "
+			       "xattr support\n", sb->s_id, sb->s_type->name);
+			rc = -EOPNOTSUPP;
+			goto out;
+		}
+		rc = root_inode->i_op->getxattr(root, XATTR_NAME_SELINUX, NULL, 0);
+		if (rc < 0 && rc != -ENODATA) {
+			if (rc == -EOPNOTSUPP)
+				printk(KERN_WARNING "SELinux: (dev %s, type "
+				       "%s) has no security xattr handler\n",
+				       sb->s_id, sb->s_type->name);
+			else
+				printk(KERN_WARNING "SELinux: (dev %s, type "
+				       "%s) getxattr errno %d\n", sb->s_id,
+				       sb->s_type->name, -rc);
+			goto out;
+		}
+	}
+
+	if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
+		printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
+		       sb->s_id, sb->s_type->name);
+
+	sbsec->flags |= SE_SBINITIALIZED;
+	if (selinux_is_sblabel_mnt(sb))
+		sbsec->flags |= SBLABEL_MNT;
+
+	/* Initialize the root inode. */
+	rc = inode_doinit_with_dentry(root_inode, root);
+
+	/* Initialize any other inodes associated with the superblock, e.g.
+	   inodes created prior to initial policy load or inodes created
+	   during get_sb by a pseudo filesystem that directly
+	   populates itself. */
+	spin_lock(&sbsec->isec_lock);
+next_inode:
+	if (!list_empty(&sbsec->isec_head)) {
+		struct inode_security_struct *isec =
+				list_entry(sbsec->isec_head.next,
+					   struct inode_security_struct, list);
+		struct inode *inode = isec->inode;
+		list_del_init(&isec->list);
+		spin_unlock(&sbsec->isec_lock);
+		inode = igrab(inode);
+		if (inode) {
+			if (!IS_PRIVATE(inode))
+				inode_doinit(inode);
+			iput(inode);
+		}
+		spin_lock(&sbsec->isec_lock);
+		goto next_inode;
+	}
+	spin_unlock(&sbsec->isec_lock);
+out:
+	return rc;
+}
+
+/*
+ * This function should allow an FS to ask what it's mount security
+ * options were so it can use those later for submounts, displaying
+ * mount options, or whatever.
+ */
+static int selinux_get_mnt_opts(const struct super_block *sb,
+				struct security_mnt_opts *opts)
+{
+	int rc = 0, i;
+	struct superblock_security_struct *sbsec = sb->s_security;
+	char *context = NULL;
+	u32 len;
+	char tmp;
+
+	security_init_mnt_opts(opts);
+
+	if (!(sbsec->flags & SE_SBINITIALIZED))
+		return -EINVAL;
+
+	if (!ss_initialized)
+		return -EINVAL;
+
+	/* make sure we always check enough bits to cover the mask */
+	BUILD_BUG_ON(SE_MNTMASK >= (1 << NUM_SEL_MNT_OPTS));
+
+	tmp = sbsec->flags & SE_MNTMASK;
+	/* count the number of mount options for this sb */
+	for (i = 0; i < NUM_SEL_MNT_OPTS; i++) {
+		if (tmp & 0x01)
+			opts->num_mnt_opts++;
+		tmp >>= 1;
+	}
+	/* Check if the Label support flag is set */
+	if (sbsec->flags & SBLABEL_MNT)
+		opts->num_mnt_opts++;
+
+	opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC);
+	if (!opts->mnt_opts) {
+		rc = -ENOMEM;
+		goto out_free;
+	}
+
+	opts->mnt_opts_flags = kcalloc(opts->num_mnt_opts, sizeof(int), GFP_ATOMIC);
+	if (!opts->mnt_opts_flags) {
+		rc = -ENOMEM;
+		goto out_free;
+	}
+
+	i = 0;
+	if (sbsec->flags & FSCONTEXT_MNT) {
+		rc = security_sid_to_context(sbsec->sid, &context, &len);
+		if (rc)
+			goto out_free;
+		opts->mnt_opts[i] = context;
+		opts->mnt_opts_flags[i++] = FSCONTEXT_MNT;
+	}
+	if (sbsec->flags & CONTEXT_MNT) {
+		rc = security_sid_to_context(sbsec->mntpoint_sid, &context, &len);
+		if (rc)
+			goto out_free;
+		opts->mnt_opts[i] = context;
+		opts->mnt_opts_flags[i++] = CONTEXT_MNT;
+	}
+	if (sbsec->flags & DEFCONTEXT_MNT) {
+		rc = security_sid_to_context(sbsec->def_sid, &context, &len);
+		if (rc)
+			goto out_free;
+		opts->mnt_opts[i] = context;
+		opts->mnt_opts_flags[i++] = DEFCONTEXT_MNT;
+	}
+	if (sbsec->flags & ROOTCONTEXT_MNT) {
+		struct inode *root = d_backing_inode(sbsec->sb->s_root);
+		struct inode_security_struct *isec = root->i_security;
+
+		rc = security_sid_to_context(isec->sid, &context, &len);
+		if (rc)
+			goto out_free;
+		opts->mnt_opts[i] = context;
+		opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT;
+	}
+	if (sbsec->flags & SBLABEL_MNT) {
+		opts->mnt_opts[i] = NULL;
+		opts->mnt_opts_flags[i++] = SBLABEL_MNT;
+	}
+
+	BUG_ON(i != opts->num_mnt_opts);
+
+	return 0;
+
+out_free:
+	security_free_mnt_opts(opts);
+	return rc;
+}
+
+static int bad_option(struct superblock_security_struct *sbsec, char flag,
+		      u32 old_sid, u32 new_sid)
+{
+	char mnt_flags = sbsec->flags & SE_MNTMASK;
+
+	/* check if the old mount command had the same options */
+	if (sbsec->flags & SE_SBINITIALIZED)
+		if (!(sbsec->flags & flag) ||
+		    (old_sid != new_sid))
+			return 1;
+
+	/* check if we were passed the same options twice,
+	 * aka someone passed context=a,context=b
+	 */
+	if (!(sbsec->flags & SE_SBINITIALIZED))
+		if (mnt_flags & flag)
+			return 1;
+	return 0;
+}
+
+/*
+ * Allow filesystems with binary mount data to explicitly set mount point
+ * labeling information.
+ */
+static int selinux_set_mnt_opts(struct super_block *sb,
+				struct security_mnt_opts *opts,
+				unsigned long kern_flags,
+				unsigned long *set_kern_flags)
+{
+	const struct cred *cred = current_cred();
+	int rc = 0, i;
+	struct superblock_security_struct *sbsec = sb->s_security;
+	const char *name = sb->s_type->name;
+	struct inode *inode = d_backing_inode(sbsec->sb->s_root);
+	struct inode_security_struct *root_isec = inode->i_security;
+	u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
+	u32 defcontext_sid = 0;
+	char **mount_options = opts->mnt_opts;
+	int *flags = opts->mnt_opts_flags;
+	int num_opts = opts->num_mnt_opts;
+
+	mutex_lock(&sbsec->lock);
+
+	if (!ss_initialized) {
+		if (!num_opts) {
+			/* Defer initialization until selinux_complete_init,
+			   after the initial policy is loaded and the security
+			   server is ready to handle calls. */
+			goto out;
+		}
+		rc = -EINVAL;
+		printk(KERN_WARNING "SELinux: Unable to set superblock options "
+			"before the security server is initialized\n");
+		goto out;
+	}
+	if (kern_flags && !set_kern_flags) {
+		/* Specifying internal flags without providing a place to
+		 * place the results is not allowed */
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Binary mount data FS will come through this function twice.  Once
+	 * from an explicit call and once from the generic calls from the vfs.
+	 * Since the generic VFS calls will not contain any security mount data
+	 * we need to skip the double mount verification.
+	 *
+	 * This does open a hole in which we will not notice if the first
+	 * mount using this sb set explict options and a second mount using
+	 * this sb does not set any security options.  (The first options
+	 * will be used for both mounts)
+	 */
+	if ((sbsec->flags & SE_SBINITIALIZED) && (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA)
+	    && (num_opts == 0))
+		goto out;
+
+	/*
+	 * parse the mount options, check if they are valid sids.
+	 * also check if someone is trying to mount the same sb more
+	 * than once with different security options.
+	 */
+	for (i = 0; i < num_opts; i++) {
+		u32 sid;
+
+		if (flags[i] == SBLABEL_MNT)
+			continue;
+		rc = security_context_str_to_sid(mount_options[i], &sid, GFP_KERNEL);
+		if (rc) {
+			printk(KERN_WARNING "SELinux: security_context_str_to_sid"
+			       "(%s) failed for (dev %s, type %s) errno=%d\n",
+			       mount_options[i], sb->s_id, name, rc);
+			goto out;
+		}
+		switch (flags[i]) {
+		case FSCONTEXT_MNT:
+			fscontext_sid = sid;
+
+			if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
+					fscontext_sid))
+				goto out_double_mount;
+
+			sbsec->flags |= FSCONTEXT_MNT;
+			break;
+		case CONTEXT_MNT:
+			context_sid = sid;
+
+			if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
+					context_sid))
+				goto out_double_mount;
+
+			sbsec->flags |= CONTEXT_MNT;
+			break;
+		case ROOTCONTEXT_MNT:
+			rootcontext_sid = sid;
+
+			if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
+					rootcontext_sid))
+				goto out_double_mount;
+
+			sbsec->flags |= ROOTCONTEXT_MNT;
+
+			break;
+		case DEFCONTEXT_MNT:
+			defcontext_sid = sid;
+
+			if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
+					defcontext_sid))
+				goto out_double_mount;
+
+			sbsec->flags |= DEFCONTEXT_MNT;
+
+			break;
+		default:
+			rc = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (sbsec->flags & SE_SBINITIALIZED) {
+		/* previously mounted with options, but not on this attempt? */
+		if ((sbsec->flags & SE_MNTMASK) && !num_opts)
+			goto out_double_mount;
+		rc = 0;
+		goto out;
+	}
+
+	if (strcmp(sb->s_type->name, "proc") == 0)
+		sbsec->flags |= SE_SBPROC | SE_SBGENFS;
+
+	if (!strcmp(sb->s_type->name, "debugfs") ||
+	    !strcmp(sb->s_type->name, "sysfs") ||
+	    !strcmp(sb->s_type->name, "pstore"))
+		sbsec->flags |= SE_SBGENFS;
+
+	if (!sbsec->behavior) {
+		/*
+		 * Determine the labeling behavior to use for this
+		 * filesystem type.
+		 */
+		rc = security_fs_use(sb);
+		if (rc) {
+			printk(KERN_WARNING
+				"%s: security_fs_use(%s) returned %d\n",
+					__func__, sb->s_type->name, rc);
+			goto out;
+		}
+	}
+	/* sets the context of the superblock for the fs being mounted. */
+	if (fscontext_sid) {
+		rc = may_context_mount_sb_relabel(fscontext_sid, sbsec, cred);
+		if (rc)
+			goto out;
+
+		sbsec->sid = fscontext_sid;
+	}
+
+	/*
+	 * Switch to using mount point labeling behavior.
+	 * sets the label used on all file below the mountpoint, and will set
+	 * the superblock context if not already set.
+	 */
+	if (kern_flags & SECURITY_LSM_NATIVE_LABELS && !context_sid) {
+		sbsec->behavior = SECURITY_FS_USE_NATIVE;
+		*set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
+	}
+
+	if (context_sid) {
+		if (!fscontext_sid) {
+			rc = may_context_mount_sb_relabel(context_sid, sbsec,
+							  cred);
+			if (rc)
+				goto out;
+			sbsec->sid = context_sid;
+		} else {
+			rc = may_context_mount_inode_relabel(context_sid, sbsec,
+							     cred);
+			if (rc)
+				goto out;
+		}
+		if (!rootcontext_sid)
+			rootcontext_sid = context_sid;
+
+		sbsec->mntpoint_sid = context_sid;
+		sbsec->behavior = SECURITY_FS_USE_MNTPOINT;
+	}
+
+	if (rootcontext_sid) {
+		rc = may_context_mount_inode_relabel(rootcontext_sid, sbsec,
+						     cred);
+		if (rc)
+			goto out;
+
+		root_isec->sid = rootcontext_sid;
+		root_isec->initialized = 1;
+	}
+
+	if (defcontext_sid) {
+		if (sbsec->behavior != SECURITY_FS_USE_XATTR &&
+			sbsec->behavior != SECURITY_FS_USE_NATIVE) {
+			rc = -EINVAL;
+			printk(KERN_WARNING "SELinux: defcontext option is "
+			       "invalid for this filesystem type\n");
+			goto out;
+		}
+
+		if (defcontext_sid != sbsec->def_sid) {
+			rc = may_context_mount_inode_relabel(defcontext_sid,
+							     sbsec, cred);
+			if (rc)
+				goto out;
+		}
+
+		sbsec->def_sid = defcontext_sid;
+	}
+
+	rc = sb_finish_set_opts(sb);
+out:
+	mutex_unlock(&sbsec->lock);
+	return rc;
+out_double_mount:
+	rc = -EINVAL;
+	printk(KERN_WARNING "SELinux: mount invalid.  Same superblock, different "
+	       "security settings for (dev %s, type %s)\n", sb->s_id, name);
+	goto out;
+}
+
+static int selinux_cmp_sb_context(const struct super_block *oldsb,
+				    const struct super_block *newsb)
+{
+	struct superblock_security_struct *old = oldsb->s_security;
+	struct superblock_security_struct *new = newsb->s_security;
+	char oldflags = old->flags & SE_MNTMASK;
+	char newflags = new->flags & SE_MNTMASK;
+
+	if (oldflags != newflags)
+		goto mismatch;
+	if ((oldflags & FSCONTEXT_MNT) && old->sid != new->sid)
+		goto mismatch;
+	if ((oldflags & CONTEXT_MNT) && old->mntpoint_sid != new->mntpoint_sid)
+		goto mismatch;
+	if ((oldflags & DEFCONTEXT_MNT) && old->def_sid != new->def_sid)
+		goto mismatch;
+	if (oldflags & ROOTCONTEXT_MNT) {
+		struct inode_security_struct *oldroot = d_backing_inode(oldsb->s_root)->i_security;
+		struct inode_security_struct *newroot = d_backing_inode(newsb->s_root)->i_security;
+		if (oldroot->sid != newroot->sid)
+			goto mismatch;
+	}
+	return 0;
+mismatch:
+	printk(KERN_WARNING "SELinux: mount invalid.  Same superblock, "
+			    "different security settings for (dev %s, "
+			    "type %s)\n", newsb->s_id, newsb->s_type->name);
+	return -EBUSY;
+}
+
+static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
+					struct super_block *newsb)
+{
+	const struct superblock_security_struct *oldsbsec = oldsb->s_security;
+	struct superblock_security_struct *newsbsec = newsb->s_security;
+
+	int set_fscontext =	(oldsbsec->flags & FSCONTEXT_MNT);
+	int set_context =	(oldsbsec->flags & CONTEXT_MNT);
+	int set_rootcontext =	(oldsbsec->flags & ROOTCONTEXT_MNT);
+
+	/*
+	 * if the parent was able to be mounted it clearly had no special lsm
+	 * mount options.  thus we can safely deal with this superblock later
+	 */
+	if (!ss_initialized)
+		return 0;
+
+	/* how can we clone if the old one wasn't set up?? */
+	BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
+
+	/* if fs is reusing a sb, make sure that the contexts match */
+	if (newsbsec->flags & SE_SBINITIALIZED)
+		return selinux_cmp_sb_context(oldsb, newsb);
+
+	mutex_lock(&newsbsec->lock);
+
+	newsbsec->flags = oldsbsec->flags;
+
+	newsbsec->sid = oldsbsec->sid;
+	newsbsec->def_sid = oldsbsec->def_sid;
+	newsbsec->behavior = oldsbsec->behavior;
+
+	if (set_context) {
+		u32 sid = oldsbsec->mntpoint_sid;
+
+		if (!set_fscontext)
+			newsbsec->sid = sid;
+		if (!set_rootcontext) {
+			struct inode *newinode = d_backing_inode(newsb->s_root);
+			struct inode_security_struct *newisec = newinode->i_security;
+			newisec->sid = sid;
+		}
+		newsbsec->mntpoint_sid = sid;
+	}
+	if (set_rootcontext) {
+		const struct inode *oldinode = d_backing_inode(oldsb->s_root);
+		const struct inode_security_struct *oldisec = oldinode->i_security;
+		struct inode *newinode = d_backing_inode(newsb->s_root);
+		struct inode_security_struct *newisec = newinode->i_security;
+
+		newisec->sid = oldisec->sid;
+	}
+
+	sb_finish_set_opts(newsb);
+	mutex_unlock(&newsbsec->lock);
+	return 0;
+}
+
+static int selinux_parse_opts_str(char *options,
+				  struct security_mnt_opts *opts)
+{
+	char *p;
+	char *context = NULL, *defcontext = NULL;
+	char *fscontext = NULL, *rootcontext = NULL;
+	int rc, num_mnt_opts = 0;
+
+	opts->num_mnt_opts = 0;
+
+	/* Standard string-based options. */
+	while ((p = strsep(&options, "|")) != NULL) {
+		int token;
+		substring_t args[MAX_OPT_ARGS];
+
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+
+		switch (token) {
+		case Opt_context:
+			if (context || defcontext) {
+				rc = -EINVAL;
+				printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
+				goto out_err;
+			}
+			context = match_strdup(&args[0]);
+			if (!context) {
+				rc = -ENOMEM;
+				goto out_err;
+			}
+			break;
+
+		case Opt_fscontext:
+			if (fscontext) {
+				rc = -EINVAL;
+				printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
+				goto out_err;
+			}
+			fscontext = match_strdup(&args[0]);
+			if (!fscontext) {
+				rc = -ENOMEM;
+				goto out_err;
+			}
+			break;
+
+		case Opt_rootcontext:
+			if (rootcontext) {
+				rc = -EINVAL;
+				printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
+				goto out_err;
+			}
+			rootcontext = match_strdup(&args[0]);
+			if (!rootcontext) {
+				rc = -ENOMEM;
+				goto out_err;
+			}
+			break;
+
+		case Opt_defcontext:
+			if (context || defcontext) {
+				rc = -EINVAL;
+				printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
+				goto out_err;
+			}
+			defcontext = match_strdup(&args[0]);
+			if (!defcontext) {
+				rc = -ENOMEM;
+				goto out_err;
+			}
+			break;
+		case Opt_labelsupport:
+			break;
+		default:
+			rc = -EINVAL;
+			printk(KERN_WARNING "SELinux:  unknown mount option\n");
+			goto out_err;
+
+		}
+	}
+
+	rc = -ENOMEM;
+	opts->mnt_opts = kcalloc(NUM_SEL_MNT_OPTS, sizeof(char *), GFP_ATOMIC);
+	if (!opts->mnt_opts)
+		goto out_err;
+
+	opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), GFP_ATOMIC);
+	if (!opts->mnt_opts_flags) {
+		kfree(opts->mnt_opts);
+		goto out_err;
+	}
+
+	if (fscontext) {
+		opts->mnt_opts[num_mnt_opts] = fscontext;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSCONTEXT_MNT;
+	}
+	if (context) {
+		opts->mnt_opts[num_mnt_opts] = context;
+		opts->mnt_opts_flags[num_mnt_opts++] = CONTEXT_MNT;
+	}
+	if (rootcontext) {
+		opts->mnt_opts[num_mnt_opts] = rootcontext;
+		opts->mnt_opts_flags[num_mnt_opts++] = ROOTCONTEXT_MNT;
+	}
+	if (defcontext) {
+		opts->mnt_opts[num_mnt_opts] = defcontext;
+		opts->mnt_opts_flags[num_mnt_opts++] = DEFCONTEXT_MNT;
+	}
+
+	opts->num_mnt_opts = num_mnt_opts;
+	return 0;
+
+out_err:
+	kfree(context);
+	kfree(defcontext);
+	kfree(fscontext);
+	kfree(rootcontext);
+	return rc;
+}
+/*
+ * string mount options parsing and call set the sbsec
+ */
+static int superblock_doinit(struct super_block *sb, void *data)
+{
+	int rc = 0;
+	char *options = data;
+	struct security_mnt_opts opts;
+
+	security_init_mnt_opts(&opts);
+
+	if (!data)
+		goto out;
+
+	BUG_ON(sb->s_type->fs_flags & FS_BINARY_MOUNTDATA);
+
+	rc = selinux_parse_opts_str(options, &opts);
+	if (rc)
+		goto out_err;
+
+out:
+	rc = selinux_set_mnt_opts(sb, &opts, 0, NULL);
+
+out_err:
+	security_free_mnt_opts(&opts);
+	return rc;
+}
+
+static void selinux_write_opts(struct seq_file *m,
+			       struct security_mnt_opts *opts)
+{
+	int i;
+	char *prefix;
+
+	for (i = 0; i < opts->num_mnt_opts; i++) {
+		char *has_comma;
+
+		if (opts->mnt_opts[i])
+			has_comma = strchr(opts->mnt_opts[i], ',');
+		else
+			has_comma = NULL;
+
+		switch (opts->mnt_opts_flags[i]) {
+		case CONTEXT_MNT:
+			prefix = CONTEXT_STR;
+			break;
+		case FSCONTEXT_MNT:
+			prefix = FSCONTEXT_STR;
+			break;
+		case ROOTCONTEXT_MNT:
+			prefix = ROOTCONTEXT_STR;
+			break;
+		case DEFCONTEXT_MNT:
+			prefix = DEFCONTEXT_STR;
+			break;
+		case SBLABEL_MNT:
+			seq_putc(m, ',');
+			seq_puts(m, LABELSUPP_STR);
+			continue;
+		default:
+			BUG();
+			return;
+		};
+		/* we need a comma before each option */
+		seq_putc(m, ',');
+		seq_puts(m, prefix);
+		if (has_comma)
+			seq_putc(m, '\"');
+		seq_escape(m, opts->mnt_opts[i], "\"\n\\");
+		if (has_comma)
+			seq_putc(m, '\"');
+	}
+}
+
+static int selinux_sb_show_options(struct seq_file *m, struct super_block *sb)
+{
+	struct security_mnt_opts opts;
+	int rc;
+
+	rc = selinux_get_mnt_opts(sb, &opts);
+	if (rc) {
+		/* before policy load we may get EINVAL, don't show anything */
+		if (rc == -EINVAL)
+			rc = 0;
+		return rc;
+	}
+
+	selinux_write_opts(m, &opts);
+
+	security_free_mnt_opts(&opts);
+
+	return rc;
+}
+
+static inline u16 inode_mode_to_security_class(umode_t mode)
+{
+	switch (mode & S_IFMT) {
+	case S_IFSOCK:
+		return SECCLASS_SOCK_FILE;
+	case S_IFLNK:
+		return SECCLASS_LNK_FILE;
+	case S_IFREG:
+		return SECCLASS_FILE;
+	case S_IFBLK:
+		return SECCLASS_BLK_FILE;
+	case S_IFDIR:
+		return SECCLASS_DIR;
+	case S_IFCHR:
+		return SECCLASS_CHR_FILE;
+	case S_IFIFO:
+		return SECCLASS_FIFO_FILE;
+
+	}
+
+	return SECCLASS_FILE;
+}
+
+static inline int default_protocol_stream(int protocol)
+{
+	return (protocol == IPPROTO_IP || protocol == IPPROTO_TCP);
+}
+
+static inline int default_protocol_dgram(int protocol)
+{
+	return (protocol == IPPROTO_IP || protocol == IPPROTO_UDP);
+}
+
+static inline u16 socket_type_to_security_class(int family, int type, int protocol)
+{
+	switch (family) {
+	case PF_UNIX:
+		switch (type) {
+		case SOCK_STREAM:
+		case SOCK_SEQPACKET:
+			return SECCLASS_UNIX_STREAM_SOCKET;
+		case SOCK_DGRAM:
+			return SECCLASS_UNIX_DGRAM_SOCKET;
+		}
+		break;
+	case PF_INET:
+	case PF_INET6:
+		switch (type) {
+		case SOCK_STREAM:
+			if (default_protocol_stream(protocol))
+				return SECCLASS_TCP_SOCKET;
+			else
+				return SECCLASS_RAWIP_SOCKET;
+		case SOCK_DGRAM:
+			if (default_protocol_dgram(protocol))
+				return SECCLASS_UDP_SOCKET;
+			else
+				return SECCLASS_RAWIP_SOCKET;
+		case SOCK_DCCP:
+			return SECCLASS_DCCP_SOCKET;
+		default:
+			return SECCLASS_RAWIP_SOCKET;
+		}
+		break;
+	case PF_NETLINK:
+		switch (protocol) {
+		case NETLINK_ROUTE:
+			return SECCLASS_NETLINK_ROUTE_SOCKET;
+		case NETLINK_SOCK_DIAG:
+			return SECCLASS_NETLINK_TCPDIAG_SOCKET;
+		case NETLINK_NFLOG:
+			return SECCLASS_NETLINK_NFLOG_SOCKET;
+		case NETLINK_XFRM:
+			return SECCLASS_NETLINK_XFRM_SOCKET;
+		case NETLINK_SELINUX:
+			return SECCLASS_NETLINK_SELINUX_SOCKET;
+		case NETLINK_ISCSI:
+			return SECCLASS_NETLINK_ISCSI_SOCKET;
+		case NETLINK_AUDIT:
+			return SECCLASS_NETLINK_AUDIT_SOCKET;
+		case NETLINK_FIB_LOOKUP:
+			return SECCLASS_NETLINK_FIB_LOOKUP_SOCKET;
+		case NETLINK_CONNECTOR:
+			return SECCLASS_NETLINK_CONNECTOR_SOCKET;
+		case NETLINK_NETFILTER:
+			return SECCLASS_NETLINK_NETFILTER_SOCKET;
+		case NETLINK_DNRTMSG:
+			return SECCLASS_NETLINK_DNRT_SOCKET;
+		case NETLINK_KOBJECT_UEVENT:
+			return SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET;
+		case NETLINK_GENERIC:
+			return SECCLASS_NETLINK_GENERIC_SOCKET;
+		case NETLINK_SCSITRANSPORT:
+			return SECCLASS_NETLINK_SCSITRANSPORT_SOCKET;
+		case NETLINK_RDMA:
+			return SECCLASS_NETLINK_RDMA_SOCKET;
+		case NETLINK_CRYPTO:
+			return SECCLASS_NETLINK_CRYPTO_SOCKET;
+		default:
+			return SECCLASS_NETLINK_SOCKET;
+		}
+	case PF_PACKET:
+		return SECCLASS_PACKET_SOCKET;
+	case PF_KEY:
+		return SECCLASS_KEY_SOCKET;
+	case PF_APPLETALK:
+		return SECCLASS_APPLETALK_SOCKET;
+	}
+
+	return SECCLASS_SOCKET;
+}
+
+static int selinux_genfs_get_sid(struct dentry *dentry,
+				 u16 tclass,
+				 u16 flags,
+				 u32 *sid)
+{
+	int rc;
+	struct super_block *sb = dentry->d_inode->i_sb;
+	char *buffer, *path;
+
+	buffer = (char *)__get_free_page(GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	path = dentry_path_raw(dentry, buffer, PAGE_SIZE);
+	if (IS_ERR(path))
+		rc = PTR_ERR(path);
+	else {
+		if (flags & SE_SBPROC) {
+			/* each process gets a /proc/PID/ entry. Strip off the
+			 * PID part to get a valid selinux labeling.
+			 * e.g. /proc/1/net/rpc/nfs -> /net/rpc/nfs */
+			while (path[1] >= '0' && path[1] <= '9') {
+				path[1] = '/';
+				path++;
+			}
+		}
+		rc = security_genfs_sid(sb->s_type->name, path, tclass, sid);
+	}
+	free_page((unsigned long)buffer);
+	return rc;
+}
+
+/* The inode's security attributes must be initialized before first use. */
+static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry)
+{
+	struct superblock_security_struct *sbsec = NULL;
+	struct inode_security_struct *isec = inode->i_security;
+	u32 sid;
+	struct dentry *dentry;
+#define INITCONTEXTLEN 255
+	char *context = NULL;
+	unsigned len = 0;
+	int rc = 0;
+
+	if (isec->initialized)
+		goto out;
+
+	mutex_lock(&isec->lock);
+	if (isec->initialized)
+		goto out_unlock;
+
+	sbsec = inode->i_sb->s_security;
+	if (!(sbsec->flags & SE_SBINITIALIZED)) {
+		/* Defer initialization until selinux_complete_init,
+		   after the initial policy is loaded and the security
+		   server is ready to handle calls. */
+		spin_lock(&sbsec->isec_lock);
+		if (list_empty(&isec->list))
+			list_add(&isec->list, &sbsec->isec_head);
+		spin_unlock(&sbsec->isec_lock);
+		goto out_unlock;
+	}
+
+	switch (sbsec->behavior) {
+	case SECURITY_FS_USE_NATIVE:
+		break;
+	case SECURITY_FS_USE_XATTR:
+		if (!inode->i_op->getxattr) {
+			isec->sid = sbsec->def_sid;
+			break;
+		}
+
+		/* Need a dentry, since the xattr API requires one.
+		   Life would be simpler if we could just pass the inode. */
+		if (opt_dentry) {
+			/* Called from d_instantiate or d_splice_alias. */
+			dentry = dget(opt_dentry);
+		} else {
+			/* Called from selinux_complete_init, try to find a dentry. */
+			dentry = d_find_alias(inode);
+		}
+		if (!dentry) {
+			/*
+			 * this is can be hit on boot when a file is accessed
+			 * before the policy is loaded.  When we load policy we
+			 * may find inodes that have no dentry on the
+			 * sbsec->isec_head list.  No reason to complain as these
+			 * will get fixed up the next time we go through
+			 * inode_doinit with a dentry, before these inodes could
+			 * be used again by userspace.
+			 */
+			goto out_unlock;
+		}
+
+		len = INITCONTEXTLEN;
+		context = kmalloc(len+1, GFP_NOFS);
+		if (!context) {
+			rc = -ENOMEM;
+			dput(dentry);
+			goto out_unlock;
+		}
+		context[len] = '\0';
+		rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX,
+					   context, len);
+		if (rc == -ERANGE) {
+			kfree(context);
+
+			/* Need a larger buffer.  Query for the right size. */
+			rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX,
+						   NULL, 0);
+			if (rc < 0) {
+				dput(dentry);
+				goto out_unlock;
+			}
+			len = rc;
+			context = kmalloc(len+1, GFP_NOFS);
+			if (!context) {
+				rc = -ENOMEM;
+				dput(dentry);
+				goto out_unlock;
+			}
+			context[len] = '\0';
+			rc = inode->i_op->getxattr(dentry,
+						   XATTR_NAME_SELINUX,
+						   context, len);
+		}
+		dput(dentry);
+		if (rc < 0) {
+			if (rc != -ENODATA) {
+				printk(KERN_WARNING "SELinux: %s:  getxattr returned "
+				       "%d for dev=%s ino=%ld\n", __func__,
+				       -rc, inode->i_sb->s_id, inode->i_ino);
+				kfree(context);
+				goto out_unlock;
+			}
+			/* Map ENODATA to the default file SID */
+			sid = sbsec->def_sid;
+			rc = 0;
+		} else {
+			rc = security_context_to_sid_default(context, rc, &sid,
+							     sbsec->def_sid,
+							     GFP_NOFS);
+			if (rc) {
+				char *dev = inode->i_sb->s_id;
+				unsigned long ino = inode->i_ino;
+
+				if (rc == -EINVAL) {
+					if (printk_ratelimit())
+						printk(KERN_NOTICE "SELinux: inode=%lu on dev=%s was found to have an invalid "
+							"context=%s.  This indicates you may need to relabel the inode or the "
+							"filesystem in question.\n", ino, dev, context);
+				} else {
+					printk(KERN_WARNING "SELinux: %s:  context_to_sid(%s) "
+					       "returned %d for dev=%s ino=%ld\n",
+					       __func__, context, -rc, dev, ino);
+				}
+				kfree(context);
+				/* Leave with the unlabeled SID */
+				rc = 0;
+				break;
+			}
+		}
+		kfree(context);
+		isec->sid = sid;
+		break;
+	case SECURITY_FS_USE_TASK:
+		isec->sid = isec->task_sid;
+		break;
+	case SECURITY_FS_USE_TRANS:
+		/* Default to the fs SID. */
+		isec->sid = sbsec->sid;
+
+		/* Try to obtain a transition SID. */
+		isec->sclass = inode_mode_to_security_class(inode->i_mode);
+		rc = security_transition_sid(isec->task_sid, sbsec->sid,
+					     isec->sclass, NULL, &sid);
+		if (rc)
+			goto out_unlock;
+		isec->sid = sid;
+		break;
+	case SECURITY_FS_USE_MNTPOINT:
+		isec->sid = sbsec->mntpoint_sid;
+		break;
+	default:
+		/* Default to the fs superblock SID. */
+		isec->sid = sbsec->sid;
+
+		if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) {
+			/* We must have a dentry to determine the label on
+			 * procfs inodes */
+			if (opt_dentry)
+				/* Called from d_instantiate or
+				 * d_splice_alias. */
+				dentry = dget(opt_dentry);
+			else
+				/* Called from selinux_complete_init, try to
+				 * find a dentry. */
+				dentry = d_find_alias(inode);
+			/*
+			 * This can be hit on boot when a file is accessed
+			 * before the policy is loaded.  When we load policy we
+			 * may find inodes that have no dentry on the
+			 * sbsec->isec_head list.  No reason to complain as
+			 * these will get fixed up the next time we go through
+			 * inode_doinit() with a dentry, before these inodes
+			 * could be used again by userspace.
+			 */
+			if (!dentry)
+				goto out_unlock;
+			isec->sclass = inode_mode_to_security_class(inode->i_mode);
+			rc = selinux_genfs_get_sid(dentry, isec->sclass,
+						   sbsec->flags, &sid);
+			dput(dentry);
+			if (rc)
+				goto out_unlock;
+			isec->sid = sid;
+		}
+		break;
+	}
+
+	isec->initialized = 1;
+
+out_unlock:
+	mutex_unlock(&isec->lock);
+out:
+	if (isec->sclass == SECCLASS_FILE)
+		isec->sclass = inode_mode_to_security_class(inode->i_mode);
+	return rc;
+}
+
+/* Convert a Linux signal to an access vector. */
+static inline u32 signal_to_av(int sig)
+{
+	u32 perm = 0;
+
+	switch (sig) {
+	case SIGCHLD:
+		/* Commonly granted from child to parent. */
+		perm = PROCESS__SIGCHLD;
+		break;
+	case SIGKILL:
+		/* Cannot be caught or ignored */
+		perm = PROCESS__SIGKILL;
+		break;
+	case SIGSTOP:
+		/* Cannot be caught or ignored */
+		perm = PROCESS__SIGSTOP;
+		break;
+	default:
+		/* All other signals. */
+		perm = PROCESS__SIGNAL;
+		break;
+	}
+
+	return perm;
+}
+
+/*
+ * Check permission between a pair of credentials
+ * fork check, ptrace check, etc.
+ */
+static int cred_has_perm(const struct cred *actor,
+			 const struct cred *target,
+			 u32 perms)
+{
+	u32 asid = cred_sid(actor), tsid = cred_sid(target);
+
+	return avc_has_perm(asid, tsid, SECCLASS_PROCESS, perms, NULL);
+}
+
+/*
+ * Check permission between a pair of tasks, e.g. signal checks,
+ * fork check, ptrace check, etc.
+ * tsk1 is the actor and tsk2 is the target
+ * - this uses the default subjective creds of tsk1
+ */
+static int task_has_perm(const struct task_struct *tsk1,
+			 const struct task_struct *tsk2,
+			 u32 perms)
+{
+	const struct task_security_struct *__tsec1, *__tsec2;
+	u32 sid1, sid2;
+
+	rcu_read_lock();
+	__tsec1 = __task_cred(tsk1)->security;	sid1 = __tsec1->sid;
+	__tsec2 = __task_cred(tsk2)->security;	sid2 = __tsec2->sid;
+	rcu_read_unlock();
+	return avc_has_perm(sid1, sid2, SECCLASS_PROCESS, perms, NULL);
+}
+
+/*
+ * Check permission between current and another task, e.g. signal checks,
+ * fork check, ptrace check, etc.
+ * current is the actor and tsk2 is the target
+ * - this uses current's subjective creds
+ */
+static int current_has_perm(const struct task_struct *tsk,
+			    u32 perms)
+{
+	u32 sid, tsid;
+
+	sid = current_sid();
+	tsid = task_sid(tsk);
+	return avc_has_perm(sid, tsid, SECCLASS_PROCESS, perms, NULL);
+}
+
+#if CAP_LAST_CAP > 63
+#error Fix SELinux to handle capabilities > 63.
+#endif
+
+/* Check whether a task is allowed to use a capability. */
+static int cred_has_capability(const struct cred *cred,
+			       int cap, int audit)
+{
+	struct common_audit_data ad;
+	struct av_decision avd;
+	u16 sclass;
+	u32 sid = cred_sid(cred);
+	u32 av = CAP_TO_MASK(cap);
+	int rc;
+
+	ad.type = LSM_AUDIT_DATA_CAP;
+	ad.u.cap = cap;
+
+	switch (CAP_TO_INDEX(cap)) {
+	case 0:
+		sclass = SECCLASS_CAPABILITY;
+		break;
+	case 1:
+		sclass = SECCLASS_CAPABILITY2;
+		break;
+	default:
+		printk(KERN_ERR
+		       "SELinux:  out of range capability %d\n", cap);
+		BUG();
+		return -EINVAL;
+	}
+
+	rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
+	if (audit == SECURITY_CAP_AUDIT) {
+		int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0);
+		if (rc2)
+			return rc2;
+	}
+	return rc;
+}
+
+/* Check whether a task is allowed to use a system operation. */
+static int task_has_system(struct task_struct *tsk,
+			   u32 perms)
+{
+	u32 sid = task_sid(tsk);
+
+	return avc_has_perm(sid, SECINITSID_KERNEL,
+			    SECCLASS_SYSTEM, perms, NULL);
+}
+
+/* Check whether a task has a particular permission to an inode.
+   The 'adp' parameter is optional and allows other audit
+   data to be passed (e.g. the dentry). */
+static int inode_has_perm(const struct cred *cred,
+			  struct inode *inode,
+			  u32 perms,
+			  struct common_audit_data *adp)
+{
+	struct inode_security_struct *isec;
+	u32 sid;
+
+	validate_creds(cred);
+
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+
+	sid = cred_sid(cred);
+	isec = inode->i_security;
+
+	return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp);
+}
+
+/* Same as inode_has_perm, but pass explicit audit data containing
+   the dentry to help the auditing code to more easily generate the
+   pathname if needed. */
+static inline int dentry_has_perm(const struct cred *cred,
+				  struct dentry *dentry,
+				  u32 av)
+{
+	struct inode *inode = d_backing_inode(dentry);
+	struct common_audit_data ad;
+
+	ad.type = LSM_AUDIT_DATA_DENTRY;
+	ad.u.dentry = dentry;
+	return inode_has_perm(cred, inode, av, &ad);
+}
+
+/* Same as inode_has_perm, but pass explicit audit data containing
+   the path to help the auditing code to more easily generate the
+   pathname if needed. */
+static inline int path_has_perm(const struct cred *cred,
+				const struct path *path,
+				u32 av)
+{
+	struct inode *inode = d_backing_inode(path->dentry);
+	struct common_audit_data ad;
+
+	ad.type = LSM_AUDIT_DATA_PATH;
+	ad.u.path = *path;
+	return inode_has_perm(cred, inode, av, &ad);
+}
+
+/* Same as path_has_perm, but uses the inode from the file struct. */
+static inline int file_path_has_perm(const struct cred *cred,
+				     struct file *file,
+				     u32 av)
+{
+	struct common_audit_data ad;
+
+	ad.type = LSM_AUDIT_DATA_PATH;
+	ad.u.path = file->f_path;
+	return inode_has_perm(cred, file_inode(file), av, &ad);
+}
+
+/* Check whether a task can use an open file descriptor to
+   access an inode in a given way.  Check access to the
+   descriptor itself, and then use dentry_has_perm to
+   check a particular permission to the file.
+   Access to the descriptor is implicitly granted if it
+   has the same SID as the process.  If av is zero, then
+   access to the file is not checked, e.g. for cases
+   where only the descriptor is affected like seek. */
+static int file_has_perm(const struct cred *cred,
+			 struct file *file,
+			 u32 av)
+{
+	struct file_security_struct *fsec = file->f_security;
+	struct inode *inode = file_inode(file);
+	struct common_audit_data ad;
+	u32 sid = cred_sid(cred);
+	int rc;
+
+	ad.type = LSM_AUDIT_DATA_PATH;
+	ad.u.path = file->f_path;
+
+	if (sid != fsec->sid) {
+		rc = avc_has_perm(sid, fsec->sid,
+				  SECCLASS_FD,
+				  FD__USE,
+				  &ad);
+		if (rc)
+			goto out;
+	}
+
+	/* av is zero if only checking access to the descriptor. */
+	rc = 0;
+	if (av)
+		rc = inode_has_perm(cred, inode, av, &ad);
+
+out:
+	return rc;
+}
+
+/*
+ * Determine the label for an inode that might be unioned.
+ */
+static int selinux_determine_inode_label(const struct inode *dir,
+					 const struct qstr *name,
+					 u16 tclass,
+					 u32 *_new_isid)
+{
+	const struct superblock_security_struct *sbsec = dir->i_sb->s_security;
+	const struct inode_security_struct *dsec = dir->i_security;
+	const struct task_security_struct *tsec = current_security();
+
+	if ((sbsec->flags & SE_SBINITIALIZED) &&
+	    (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) {
+		*_new_isid = sbsec->mntpoint_sid;
+	} else if ((sbsec->flags & SBLABEL_MNT) &&
+		   tsec->create_sid) {
+		*_new_isid = tsec->create_sid;
+	} else {
+		return security_transition_sid(tsec->sid, dsec->sid, tclass,
+					       name, _new_isid);
+	}
+
+	return 0;
+}
+
+/* Check whether a task can create a file. */
+static int may_create(struct inode *dir,
+		      struct dentry *dentry,
+		      u16 tclass)
+{
+	const struct task_security_struct *tsec = current_security();
+	struct inode_security_struct *dsec;
+	struct superblock_security_struct *sbsec;
+	u32 sid, newsid;
+	struct common_audit_data ad;
+	int rc;
+
+	dsec = dir->i_security;
+	sbsec = dir->i_sb->s_security;
+
+	sid = tsec->sid;
+
+	ad.type = LSM_AUDIT_DATA_DENTRY;
+	ad.u.dentry = dentry;
+
+	rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR,
+			  DIR__ADD_NAME | DIR__SEARCH,
+			  &ad);
+	if (rc)
+		return rc;
+
+	rc = selinux_determine_inode_label(dir, &dentry->d_name, tclass,
+					   &newsid);
+	if (rc)
+		return rc;
+
+	rc = avc_has_perm(sid, newsid, tclass, FILE__CREATE, &ad);
+	if (rc)
+		return rc;
+
+	return avc_has_perm(newsid, sbsec->sid,
+			    SECCLASS_FILESYSTEM,
+			    FILESYSTEM__ASSOCIATE, &ad);
+}
+
+/* Check whether a task can create a key. */
+static int may_create_key(u32 ksid,
+			  struct task_struct *ctx)
+{
+	u32 sid = task_sid(ctx);
+
+	return avc_has_perm(sid, ksid, SECCLASS_KEY, KEY__CREATE, NULL);
+}
+
+#define MAY_LINK	0
+#define MAY_UNLINK	1
+#define MAY_RMDIR	2
+
+/* Check whether a task can link, unlink, or rmdir a file/directory. */
+static int may_link(struct inode *dir,
+		    struct dentry *dentry,
+		    int kind)
+
+{
+	struct inode_security_struct *dsec, *isec;
+	struct common_audit_data ad;
+	u32 sid = current_sid();
+	u32 av;
+	int rc;
+
+	dsec = dir->i_security;
+	isec = d_backing_inode(dentry)->i_security;
+
+	ad.type = LSM_AUDIT_DATA_DENTRY;
+	ad.u.dentry = dentry;
+
+	av = DIR__SEARCH;
+	av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME);
+	rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR, av, &ad);
+	if (rc)
+		return rc;
+
+	switch (kind) {
+	case MAY_LINK:
+		av = FILE__LINK;
+		break;
+	case MAY_UNLINK:
+		av = FILE__UNLINK;
+		break;
+	case MAY_RMDIR:
+		av = DIR__RMDIR;
+		break;
+	default:
+		printk(KERN_WARNING "SELinux: %s:  unrecognized kind %d\n",
+			__func__, kind);
+		return 0;
+	}
+
+	rc = avc_has_perm(sid, isec->sid, isec->sclass, av, &ad);
+	return rc;
+}
+
+static inline int may_rename(struct inode *old_dir,
+			     struct dentry *old_dentry,
+			     struct inode *new_dir,
+			     struct dentry *new_dentry)
+{
+	struct inode_security_struct *old_dsec, *new_dsec, *old_isec, *new_isec;
+	struct common_audit_data ad;
+	u32 sid = current_sid();
+	u32 av;
+	int old_is_dir, new_is_dir;
+	int rc;
+
+	old_dsec = old_dir->i_security;
+	old_isec = d_backing_inode(old_dentry)->i_security;
+	old_is_dir = d_is_dir(old_dentry);
+	new_dsec = new_dir->i_security;
+
+	ad.type = LSM_AUDIT_DATA_DENTRY;
+
+	ad.u.dentry = old_dentry;
+	rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR,
+			  DIR__REMOVE_NAME | DIR__SEARCH, &ad);
+	if (rc)
+		return rc;
+	rc = avc_has_perm(sid, old_isec->sid,
+			  old_isec->sclass, FILE__RENAME, &ad);
+	if (rc)
+		return rc;
+	if (old_is_dir && new_dir != old_dir) {
+		rc = avc_has_perm(sid, old_isec->sid,
+				  old_isec->sclass, DIR__REPARENT, &ad);
+		if (rc)
+			return rc;
+	}
+
+	ad.u.dentry = new_dentry;
+	av = DIR__ADD_NAME | DIR__SEARCH;
+	if (d_is_positive(new_dentry))
+		av |= DIR__REMOVE_NAME;
+	rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad);
+	if (rc)
+		return rc;
+	if (d_is_positive(new_dentry)) {
+		new_isec = d_backing_inode(new_dentry)->i_security;
+		new_is_dir = d_is_dir(new_dentry);
+		rc = avc_has_perm(sid, new_isec->sid,
+				  new_isec->sclass,
+				  (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+/* Check whether a task can perform a filesystem operation. */
+static int superblock_has_perm(const struct cred *cred,
+			       struct super_block *sb,
+			       u32 perms,
+			       struct common_audit_data *ad)
+{
+	struct superblock_security_struct *sbsec;
+	u32 sid = cred_sid(cred);
+
+	sbsec = sb->s_security;
+	return avc_has_perm(sid, sbsec->sid, SECCLASS_FILESYSTEM, perms, ad);
+}
+
+/* Convert a Linux mode and permission mask to an access vector. */
+static inline u32 file_mask_to_av(int mode, int mask)
+{
+	u32 av = 0;
+
+	if (!S_ISDIR(mode)) {
+		if (mask & MAY_EXEC)
+			av |= FILE__EXECUTE;
+		if (mask & MAY_READ)
+			av |= FILE__READ;
+
+		if (mask & MAY_APPEND)
+			av |= FILE__APPEND;
+		else if (mask & MAY_WRITE)
+			av |= FILE__WRITE;
+
+	} else {
+		if (mask & MAY_EXEC)
+			av |= DIR__SEARCH;
+		if (mask & MAY_WRITE)
+			av |= DIR__WRITE;
+		if (mask & MAY_READ)
+			av |= DIR__READ;
+	}
+
+	return av;
+}
+
+/* Convert a Linux file to an access vector. */
+static inline u32 file_to_av(struct file *file)
+{
+	u32 av = 0;
+
+	if (file->f_mode & FMODE_READ)
+		av |= FILE__READ;
+	if (file->f_mode & FMODE_WRITE) {
+		if (file->f_flags & O_APPEND)
+			av |= FILE__APPEND;
+		else
+			av |= FILE__WRITE;
+	}
+	if (!av) {
+		/*
+		 * Special file opened with flags 3 for ioctl-only use.
+		 */
+		av = FILE__IOCTL;
+	}
+
+	return av;
+}
+
+/*
+ * Convert a file to an access vector and include the correct open
+ * open permission.
+ */
+static inline u32 open_file_to_av(struct file *file)
+{
+	u32 av = file_to_av(file);
+
+	if (selinux_policycap_openperm)
+		av |= FILE__OPEN;
+
+	return av;
+}
+
+/* Hook functions begin here. */
+
+static int selinux_binder_set_context_mgr(struct task_struct *mgr)
+{
+	u32 mysid = current_sid();
+	u32 mgrsid = task_sid(mgr);
+
+	return avc_has_perm(mysid, mgrsid, SECCLASS_BINDER,
+			    BINDER__SET_CONTEXT_MGR, NULL);
+}
+
+static int selinux_binder_transaction(struct task_struct *from,
+				      struct task_struct *to)
+{
+	u32 mysid = current_sid();
+	u32 fromsid = task_sid(from);
+	u32 tosid = task_sid(to);
+	int rc;
+
+	if (mysid != fromsid) {
+		rc = avc_has_perm(mysid, fromsid, SECCLASS_BINDER,
+				  BINDER__IMPERSONATE, NULL);
+		if (rc)
+			return rc;
+	}
+
+	return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__CALL,
+			    NULL);
+}
+
+static int selinux_binder_transfer_binder(struct task_struct *from,
+					  struct task_struct *to)
+{
+	u32 fromsid = task_sid(from);
+	u32 tosid = task_sid(to);
+
+	return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__TRANSFER,
+			    NULL);
+}
+
+static int selinux_binder_transfer_file(struct task_struct *from,
+					struct task_struct *to,
+					struct file *file)
+{
+	u32 sid = task_sid(to);
+	struct file_security_struct *fsec = file->f_security;
+	struct inode *inode = d_backing_inode(file->f_path.dentry);
+	struct inode_security_struct *isec = inode->i_security;
+	struct common_audit_data ad;
+	int rc;
+
+	ad.type = LSM_AUDIT_DATA_PATH;
+	ad.u.path = file->f_path;
+
+	if (sid != fsec->sid) {
+		rc = avc_has_perm(sid, fsec->sid,
+				  SECCLASS_FD,
+				  FD__USE,
+				  &ad);
+		if (rc)
+			return rc;
+	}
+
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+
+	return avc_has_perm(sid, isec->sid, isec->sclass, file_to_av(file),
+			    &ad);
+}
+
+static int selinux_ptrace_access_check(struct task_struct *child,
+				     unsigned int mode)
+{
+	if (mode & PTRACE_MODE_READ) {
+		u32 sid = current_sid();
+		u32 csid = task_sid(child);
+		return avc_has_perm(sid, csid, SECCLASS_FILE, FILE__READ, NULL);
+	}
+
+	return current_has_perm(child, PROCESS__PTRACE);
+}
+
+static int selinux_ptrace_traceme(struct task_struct *parent)
+{
+	return task_has_perm(parent, current, PROCESS__PTRACE);
+}
+
+static int selinux_capget(struct task_struct *target, kernel_cap_t *effective,
+			  kernel_cap_t *inheritable, kernel_cap_t *permitted)
+{
+	return current_has_perm(target, PROCESS__GETCAP);
+}
+
+static int selinux_capset(struct cred *new, const struct cred *old,
+			  const kernel_cap_t *effective,
+			  const kernel_cap_t *inheritable,
+			  const kernel_cap_t *permitted)
+{
+	return cred_has_perm(old, new, PROCESS__SETCAP);
+}
+
+/*
+ * (This comment used to live with the selinux_task_setuid hook,
+ * which was removed).
+ *
+ * Since setuid only affects the current process, and since the SELinux
+ * controls are not based on the Linux identity attributes, SELinux does not
+ * need to control this operation.  However, SELinux does control the use of
+ * the CAP_SETUID and CAP_SETGID capabilities using the capable hook.
+ */
+
+static int selinux_capable(const struct cred *cred, struct user_namespace *ns,
+			   int cap, int audit)
+{
+	return cred_has_capability(cred, cap, audit);
+}
+
+static int selinux_quotactl(int cmds, int type, int id, struct super_block *sb)
+{
+	const struct cred *cred = current_cred();
+	int rc = 0;
+
+	if (!sb)
+		return 0;
+
+	switch (cmds) {
+	case Q_SYNC:
+	case Q_QUOTAON:
+	case Q_QUOTAOFF:
+	case Q_SETINFO:
+	case Q_SETQUOTA:
+		rc = superblock_has_perm(cred, sb, FILESYSTEM__QUOTAMOD, NULL);
+		break;
+	case Q_GETFMT:
+	case Q_GETINFO:
+	case Q_GETQUOTA:
+		rc = superblock_has_perm(cred, sb, FILESYSTEM__QUOTAGET, NULL);
+		break;
+	default:
+		rc = 0;  /* let the kernel handle invalid cmds */
+		break;
+	}
+	return rc;
+}
+
+static int selinux_quota_on(struct dentry *dentry)
+{
+	const struct cred *cred = current_cred();
+
+	return dentry_has_perm(cred, dentry, FILE__QUOTAON);
+}
+
+static int selinux_syslog(int type)
+{
+	int rc;
+
+	switch (type) {
+	case SYSLOG_ACTION_READ_ALL:	/* Read last kernel messages */
+	case SYSLOG_ACTION_SIZE_BUFFER:	/* Return size of the log buffer */
+		rc = task_has_system(current, SYSTEM__SYSLOG_READ);
+		break;
+	case SYSLOG_ACTION_CONSOLE_OFF:	/* Disable logging to console */
+	case SYSLOG_ACTION_CONSOLE_ON:	/* Enable logging to console */
+	/* Set level of messages printed to console */
+	case SYSLOG_ACTION_CONSOLE_LEVEL:
+		rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
+		break;
+	case SYSLOG_ACTION_CLOSE:	/* Close log */
+	case SYSLOG_ACTION_OPEN:	/* Open log */
+	case SYSLOG_ACTION_READ:	/* Read from log */
+	case SYSLOG_ACTION_READ_CLEAR:	/* Read/clear last kernel messages */
+	case SYSLOG_ACTION_CLEAR:	/* Clear ring buffer */
+	default:
+		rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
+		break;
+	}
+	return rc;
+}
+
+/*
+ * Check that a process has enough memory to allocate a new virtual
+ * mapping. 0 means there is enough memory for the allocation to
+ * succeed and -ENOMEM implies there is not.
+ *
+ * Do not audit the selinux permission check, as this is applied to all
+ * processes that allocate mappings.
+ */
+static int selinux_vm_enough_memory(struct mm_struct *mm, long pages)
+{
+	int rc, cap_sys_admin = 0;
+
+	rc = cred_has_capability(current_cred(), CAP_SYS_ADMIN,
+					SECURITY_CAP_NOAUDIT);
+	if (rc == 0)
+		cap_sys_admin = 1;
+
+	return cap_sys_admin;
+}
+
+/* binprm security operations */
+
+static int check_nnp_nosuid(const struct linux_binprm *bprm,
+			    const struct task_security_struct *old_tsec,
+			    const struct task_security_struct *new_tsec)
+{
+	int nnp = (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS);
+	int nosuid = (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID);
+	int rc;
+
+	if (!nnp && !nosuid)
+		return 0; /* neither NNP nor nosuid */
+
+	if (new_tsec->sid == old_tsec->sid)
+		return 0; /* No change in credentials */
+
+	/*
+	 * The only transitions we permit under NNP or nosuid
+	 * are transitions to bounded SIDs, i.e. SIDs that are
+	 * guaranteed to only be allowed a subset of the permissions
+	 * of the current SID.
+	 */
+	rc = security_bounded_transition(old_tsec->sid, new_tsec->sid);
+	if (rc) {
+		/*
+		 * On failure, preserve the errno values for NNP vs nosuid.
+		 * NNP:  Operation not permitted for caller.
+		 * nosuid:  Permission denied to file.
+		 */
+		if (nnp)
+			return -EPERM;
+		else
+			return -EACCES;
+	}
+	return 0;
+}
+
+static int selinux_bprm_set_creds(struct linux_binprm *bprm)
+{
+	const struct task_security_struct *old_tsec;
+	struct task_security_struct *new_tsec;
+	struct inode_security_struct *isec;
+	struct common_audit_data ad;
+	struct inode *inode = file_inode(bprm->file);
+	int rc;
+
+	/* SELinux context only depends on initial program or script and not
+	 * the script interpreter */
+	if (bprm->cred_prepared)
+		return 0;
+
+	old_tsec = current_security();
+	new_tsec = bprm->cred->security;
+	isec = inode->i_security;
+
+	/* Default to the current task SID. */
+	new_tsec->sid = old_tsec->sid;
+	new_tsec->osid = old_tsec->sid;
+
+	/* Reset fs, key, and sock SIDs on execve. */
+	new_tsec->create_sid = 0;
+	new_tsec->keycreate_sid = 0;
+	new_tsec->sockcreate_sid = 0;
+
+	if (old_tsec->exec_sid) {
+		new_tsec->sid = old_tsec->exec_sid;
+		/* Reset exec SID on execve. */
+		new_tsec->exec_sid = 0;
+
+		/* Fail on NNP or nosuid if not an allowed transition. */
+		rc = check_nnp_nosuid(bprm, old_tsec, new_tsec);
+		if (rc)
+			return rc;
+	} else {
+		/* Check for a default transition on this program. */
+		rc = security_transition_sid(old_tsec->sid, isec->sid,
+					     SECCLASS_PROCESS, NULL,
+					     &new_tsec->sid);
+		if (rc)
+			return rc;
+
+		/*
+		 * Fallback to old SID on NNP or nosuid if not an allowed
+		 * transition.
+		 */
+		rc = check_nnp_nosuid(bprm, old_tsec, new_tsec);
+		if (rc)
+			new_tsec->sid = old_tsec->sid;
+	}
+
+	ad.type = LSM_AUDIT_DATA_PATH;
+	ad.u.path = bprm->file->f_path;
+
+	if (new_tsec->sid == old_tsec->sid) {
+		rc = avc_has_perm(old_tsec->sid, isec->sid,
+				  SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, &ad);
+		if (rc)
+			return rc;
+	} else {
+		/* Check permissions for the transition. */
+		rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
+				  SECCLASS_PROCESS, PROCESS__TRANSITION, &ad);
+		if (rc)
+			return rc;
+
+		rc = avc_has_perm(new_tsec->sid, isec->sid,
+				  SECCLASS_FILE, FILE__ENTRYPOINT, &ad);
+		if (rc)
+			return rc;
+
+		/* Check for shared state */
+		if (bprm->unsafe & LSM_UNSAFE_SHARE) {
+			rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
+					  SECCLASS_PROCESS, PROCESS__SHARE,
+					  NULL);
+			if (rc)
+				return -EPERM;
+		}
+
+		/* Make sure that anyone attempting to ptrace over a task that
+		 * changes its SID has the appropriate permit */
+		if (bprm->unsafe &
+		    (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
+			struct task_struct *tracer;
+			struct task_security_struct *sec;
+			u32 ptsid = 0;
+
+			rcu_read_lock();
+			tracer = ptrace_parent(current);
+			if (likely(tracer != NULL)) {
+				sec = __task_cred(tracer)->security;
+				ptsid = sec->sid;
+			}
+			rcu_read_unlock();
+
+			if (ptsid != 0) {
+				rc = avc_has_perm(ptsid, new_tsec->sid,
+						  SECCLASS_PROCESS,
+						  PROCESS__PTRACE, NULL);
+				if (rc)
+					return -EPERM;
+			}
+		}
+
+		/* Clear any possibly unsafe personality bits on exec: */
+		bprm->per_clear |= PER_CLEAR_ON_SETID;
+	}
+
+	return 0;
+}
+
+static int selinux_bprm_secureexec(struct linux_binprm *bprm)
+{
+	const struct task_security_struct *tsec = current_security();
+	u32 sid, osid;
+	int atsecure = 0;
+
+	sid = tsec->sid;
+	osid = tsec->osid;
+
+	if (osid != sid) {
+		/* Enable secure mode for SIDs transitions unless
+		   the noatsecure permission is granted between
+		   the two SIDs, i.e. ahp returns 0. */
+		atsecure = avc_has_perm(osid, sid,
+					SECCLASS_PROCESS,
+					PROCESS__NOATSECURE, NULL);
+	}
+
+	return !!atsecure;
+}
+
+static int match_file(const void *p, struct file *file, unsigned fd)
+{
+	return file_has_perm(p, file, file_to_av(file)) ? fd + 1 : 0;
+}
+
+/* Derived from fs/exec.c:flush_old_files. */
+static inline void flush_unauthorized_files(const struct cred *cred,
+					    struct files_struct *files)
+{
+	struct file *file, *devnull = NULL;
+	struct tty_struct *tty;
+	int drop_tty = 0;
+	unsigned n;
+
+	tty = get_current_tty();
+	if (tty) {
+		spin_lock(&tty_files_lock);
+		if (!list_empty(&tty->tty_files)) {
+			struct tty_file_private *file_priv;
+
+			/* Revalidate access to controlling tty.
+			   Use file_path_has_perm on the tty path directly
+			   rather than using file_has_perm, as this particular
+			   open file may belong to another process and we are
+			   only interested in the inode-based check here. */
+			file_priv = list_first_entry(&tty->tty_files,
+						struct tty_file_private, list);
+			file = file_priv->file;
+			if (file_path_has_perm(cred, file, FILE__READ | FILE__WRITE))
+				drop_tty = 1;
+		}
+		spin_unlock(&tty_files_lock);
+		tty_kref_put(tty);
+	}
+	/* Reset controlling tty. */
+	if (drop_tty)
+		no_tty();
+
+	/* Revalidate access to inherited open files. */
+	n = iterate_fd(files, 0, match_file, cred);
+	if (!n) /* none found? */
+		return;
+
+	devnull = dentry_open(&selinux_null, O_RDWR, cred);
+	if (IS_ERR(devnull))
+		devnull = NULL;
+	/* replace all the matching ones with this */
+	do {
+		replace_fd(n - 1, devnull, 0);
+	} while ((n = iterate_fd(files, n, match_file, cred)) != 0);
+	if (devnull)
+		fput(devnull);
+}
+
+/*
+ * Prepare a process for imminent new credential changes due to exec
+ */
+static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
+{
+	struct task_security_struct *new_tsec;
+	struct rlimit *rlim, *initrlim;
+	int rc, i;
+
+	new_tsec = bprm->cred->security;
+	if (new_tsec->sid == new_tsec->osid)
+		return;
+
+	/* Close files for which the new task SID is not authorized. */
+	flush_unauthorized_files(bprm->cred, current->files);
+
+	/* Always clear parent death signal on SID transitions. */
+	current->pdeath_signal = 0;
+
+	/* Check whether the new SID can inherit resource limits from the old
+	 * SID.  If not, reset all soft limits to the lower of the current
+	 * task's hard limit and the init task's soft limit.
+	 *
+	 * Note that the setting of hard limits (even to lower them) can be
+	 * controlled by the setrlimit check.  The inclusion of the init task's
+	 * soft limit into the computation is to avoid resetting soft limits
+	 * higher than the default soft limit for cases where the default is
+	 * lower than the hard limit, e.g. RLIMIT_CORE or RLIMIT_STACK.
+	 */
+	rc = avc_has_perm(new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS,
+			  PROCESS__RLIMITINH, NULL);
+	if (rc) {
+		/* protect against do_prlimit() */
+		task_lock(current);
+		for (i = 0; i < RLIM_NLIMITS; i++) {
+			rlim = current->signal->rlim + i;
+			initrlim = init_task.signal->rlim + i;
+			rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
+		}
+		task_unlock(current);
+		update_rlimit_cpu(current, rlimit(RLIMIT_CPU));
+	}
+}
+
+/*
+ * Clean up the process immediately after the installation of new credentials
+ * due to exec
+ */
+static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
+{
+	const struct task_security_struct *tsec = current_security();
+	struct itimerval itimer;
+	u32 osid, sid;
+	int rc, i;
+
+	osid = tsec->osid;
+	sid = tsec->sid;
+
+	if (sid == osid)
+		return;
+
+	/* Check whether the new SID can inherit signal state from the old SID.
+	 * If not, clear itimers to avoid subsequent signal generation and
+	 * flush and unblock signals.
+	 *
+	 * This must occur _after_ the task SID has been updated so that any
+	 * kill done after the flush will be checked against the new SID.
+	 */
+	rc = avc_has_perm(osid, sid, SECCLASS_PROCESS, PROCESS__SIGINH, NULL);
+	if (rc) {
+		memset(&itimer, 0, sizeof itimer);
+		for (i = 0; i < 3; i++)
+			do_setitimer(i, &itimer, NULL);
+		spin_lock_irq(&current->sighand->siglock);
+		if (!fatal_signal_pending(current)) {
+			flush_sigqueue(&current->pending);
+			flush_sigqueue(&current->signal->shared_pending);
+			flush_signal_handlers(current, 1);
+			sigemptyset(&current->blocked);
+			recalc_sigpending();
+		}
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+
+	/* Wake up the parent if it is waiting so that it can recheck
+	 * wait permission to the new task SID. */
+	read_lock(&tasklist_lock);
+	__wake_up_parent(current, current->real_parent);
+	read_unlock(&tasklist_lock);
+}
+
+/* superblock security operations */
+
+static int selinux_sb_alloc_security(struct super_block *sb)
+{
+	return superblock_alloc_security(sb);
+}
+
+static void selinux_sb_free_security(struct super_block *sb)
+{
+	superblock_free_security(sb);
+}
+
+static inline int match_prefix(char *prefix, int plen, char *option, int olen)
+{
+	if (plen > olen)
+		return 0;
+
+	return !memcmp(prefix, option, plen);
+}
+
+static inline int selinux_option(char *option, int len)
+{
+	return (match_prefix(CONTEXT_STR, sizeof(CONTEXT_STR)-1, option, len) ||
+		match_prefix(FSCONTEXT_STR, sizeof(FSCONTEXT_STR)-1, option, len) ||
+		match_prefix(DEFCONTEXT_STR, sizeof(DEFCONTEXT_STR)-1, option, len) ||
+		match_prefix(ROOTCONTEXT_STR, sizeof(ROOTCONTEXT_STR)-1, option, len) ||
+		match_prefix(LABELSUPP_STR, sizeof(LABELSUPP_STR)-1, option, len));
+}
+
+static inline void take_option(char **to, char *from, int *first, int len)
+{
+	if (!*first) {
+		**to = ',';
+		*to += 1;
+	} else
+		*first = 0;
+	memcpy(*to, from, len);
+	*to += len;
+}
+
+static inline void take_selinux_option(char **to, char *from, int *first,
+				       int len)
+{
+	int current_size = 0;
+
+	if (!*first) {
+		**to = '|';
+		*to += 1;
+	} else
+		*first = 0;
+
+	while (current_size < len) {
+		if (*from != '"') {
+			**to = *from;
+			*to += 1;
+		}
+		from += 1;
+		current_size += 1;
+	}
+}
+
+static int selinux_sb_copy_data(char *orig, char *copy)
+{
+	int fnosec, fsec, rc = 0;
+	char *in_save, *in_curr, *in_end;
+	char *sec_curr, *nosec_save, *nosec;
+	int open_quote = 0;
+
+	in_curr = orig;
+	sec_curr = copy;
+
+	nosec = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!nosec) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	nosec_save = nosec;
+	fnosec = fsec = 1;
+	in_save = in_end = orig;
+
+	do {
+		if (*in_end == '"')
+			open_quote = !open_quote;
+		if ((*in_end == ',' && open_quote == 0) ||
+				*in_end == '\0') {
+			int len = in_end - in_curr;
+
+			if (selinux_option(in_curr, len))
+				take_selinux_option(&sec_curr, in_curr, &fsec, len);
+			else
+				take_option(&nosec, in_curr, &fnosec, len);
+
+			in_curr = in_end + 1;
+		}
+	} while (*in_end++);
+
+	strcpy(in_save, nosec_save);
+	free_page((unsigned long)nosec_save);
+out:
+	return rc;
+}
+
+static int selinux_sb_remount(struct super_block *sb, void *data)
+{
+	int rc, i, *flags;
+	struct security_mnt_opts opts;
+	char *secdata, **mount_options;
+	struct superblock_security_struct *sbsec = sb->s_security;
+
+	if (!(sbsec->flags & SE_SBINITIALIZED))
+		return 0;
+
+	if (!data)
+		return 0;
+
+	if (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA)
+		return 0;
+
+	security_init_mnt_opts(&opts);
+	secdata = alloc_secdata();
+	if (!secdata)
+		return -ENOMEM;
+	rc = selinux_sb_copy_data(data, secdata);
+	if (rc)
+		goto out_free_secdata;
+
+	rc = selinux_parse_opts_str(secdata, &opts);
+	if (rc)
+		goto out_free_secdata;
+
+	mount_options = opts.mnt_opts;
+	flags = opts.mnt_opts_flags;
+
+	for (i = 0; i < opts.num_mnt_opts; i++) {
+		u32 sid;
+
+		if (flags[i] == SBLABEL_MNT)
+			continue;
+		rc = security_context_str_to_sid(mount_options[i], &sid, GFP_KERNEL);
+		if (rc) {
+			printk(KERN_WARNING "SELinux: security_context_str_to_sid"
+			       "(%s) failed for (dev %s, type %s) errno=%d\n",
+			       mount_options[i], sb->s_id, sb->s_type->name, rc);
+			goto out_free_opts;
+		}
+		rc = -EINVAL;
+		switch (flags[i]) {
+		case FSCONTEXT_MNT:
+			if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
+				goto out_bad_option;
+			break;
+		case CONTEXT_MNT:
+			if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
+				goto out_bad_option;
+			break;
+		case ROOTCONTEXT_MNT: {
+			struct inode_security_struct *root_isec;
+			root_isec = d_backing_inode(sb->s_root)->i_security;
+
+			if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
+				goto out_bad_option;
+			break;
+		}
+		case DEFCONTEXT_MNT:
+			if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
+				goto out_bad_option;
+			break;
+		default:
+			goto out_free_opts;
+		}
+	}
+
+	rc = 0;
+out_free_opts:
+	security_free_mnt_opts(&opts);
+out_free_secdata:
+	free_secdata(secdata);
+	return rc;
+out_bad_option:
+	printk(KERN_WARNING "SELinux: unable to change security options "
+	       "during remount (dev %s, type=%s)\n", sb->s_id,
+	       sb->s_type->name);
+	goto out_free_opts;
+}
+
+static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
+{
+	const struct cred *cred = current_cred();
+	struct common_audit_data ad;
+	int rc;
+
+	rc = superblock_doinit(sb, data);
+	if (rc)
+		return rc;
+
+	/* Allow all mounts performed by the kernel */
+	if (flags & MS_KERNMOUNT)
+		return 0;
+
+	ad.type = LSM_AUDIT_DATA_DENTRY;
+	ad.u.dentry = sb->s_root;
+	return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad);
+}
+
+static int selinux_sb_statfs(struct dentry *dentry)
+{
+	const struct cred *cred = current_cred();
+	struct common_audit_data ad;
+
+	ad.type = LSM_AUDIT_DATA_DENTRY;
+	ad.u.dentry = dentry->d_sb->s_root;
+	return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
+}
+
+static int selinux_mount(const char *dev_name,
+			 struct path *path,
+			 const char *type,
+			 unsigned long flags,
+			 void *data)
+{
+	const struct cred *cred = current_cred();
+
+	if (flags & MS_REMOUNT)
+		return superblock_has_perm(cred, path->dentry->d_sb,
+					   FILESYSTEM__REMOUNT, NULL);
+	else
+		return path_has_perm(cred, path, FILE__MOUNTON);
+}
+
+static int selinux_umount(struct vfsmount *mnt, int flags)
+{
+	const struct cred *cred = current_cred();
+
+	return superblock_has_perm(cred, mnt->mnt_sb,
+				   FILESYSTEM__UNMOUNT, NULL);
+}
+
+/* inode security operations */
+
+static int selinux_inode_alloc_security(struct inode *inode)
+{
+	return inode_alloc_security(inode);
+}
+
+static void selinux_inode_free_security(struct inode *inode)
+{
+	inode_free_security(inode);
+}
+
+static int selinux_dentry_init_security(struct dentry *dentry, int mode,
+					struct qstr *name, void **ctx,
+					u32 *ctxlen)
+{
+	u32 newsid;
+	int rc;
+
+	rc = selinux_determine_inode_label(d_inode(dentry->d_parent), name,
+					   inode_mode_to_security_class(mode),
+					   &newsid);
+	if (rc)
+		return rc;
+
+	return security_sid_to_context(newsid, (char **)ctx, ctxlen);
+}
+
+static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
+				       const struct qstr *qstr,
+				       const char **name,
+				       void **value, size_t *len)
+{
+	const struct task_security_struct *tsec = current_security();
+	struct inode_security_struct *dsec;
+	struct superblock_security_struct *sbsec;
+	u32 sid, newsid, clen;
+	int rc;
+	char *context;
+
+	dsec = dir->i_security;
+	sbsec = dir->i_sb->s_security;
+
+	sid = tsec->sid;
+	newsid = tsec->create_sid;
+
+	rc = selinux_determine_inode_label(
+		dir, qstr,
+		inode_mode_to_security_class(inode->i_mode),
+		&newsid);
+	if (rc)
+		return rc;
+
+	/* Possibly defer initialization to selinux_complete_init. */
+	if (sbsec->flags & SE_SBINITIALIZED) {
+		struct inode_security_struct *isec = inode->i_security;
+		isec->sclass = inode_mode_to_security_class(inode->i_mode);
+		isec->sid = newsid;
+		isec->initialized = 1;
+	}
+
+	if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT))
+		return -EOPNOTSUPP;
+
+	if (name)
+		*name = XATTR_SELINUX_SUFFIX;
+
+	if (value && len) {
+		rc = security_sid_to_context_force(newsid, &context, &clen);
+		if (rc)
+			return rc;
+		*value = context;
+		*len = clen;
+	}
+
+	return 0;
+}
+
+static int selinux_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	return may_create(dir, dentry, SECCLASS_FILE);
+}
+
+static int selinux_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
+{
+	return may_link(dir, old_dentry, MAY_LINK);
+}
+
+static int selinux_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+	return may_link(dir, dentry, MAY_UNLINK);
+}
+
+static int selinux_inode_symlink(struct inode *dir, struct dentry *dentry, const char *name)
+{
+	return may_create(dir, dentry, SECCLASS_LNK_FILE);
+}
+
+static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mask)
+{
+	return may_create(dir, dentry, SECCLASS_DIR);
+}
+
+static int selinux_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	return may_link(dir, dentry, MAY_RMDIR);
+}
+
+static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+{
+	return may_create(dir, dentry, inode_mode_to_security_class(mode));
+}
+
+static int selinux_inode_rename(struct inode *old_inode, struct dentry *old_dentry,
+				struct inode *new_inode, struct dentry *new_dentry)
+{
+	return may_rename(old_inode, old_dentry, new_inode, new_dentry);
+}
+
+static int selinux_inode_readlink(struct dentry *dentry)
+{
+	const struct cred *cred = current_cred();
+
+	return dentry_has_perm(cred, dentry, FILE__READ);
+}
+
+static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
+				     bool rcu)
+{
+	const struct cred *cred = current_cred();
+	struct common_audit_data ad;
+	struct inode_security_struct *isec;
+	u32 sid;
+
+	validate_creds(cred);
+
+	ad.type = LSM_AUDIT_DATA_DENTRY;
+	ad.u.dentry = dentry;
+	sid = cred_sid(cred);
+	isec = inode->i_security;
+
+	return avc_has_perm_flags(sid, isec->sid, isec->sclass, FILE__READ, &ad,
+				  rcu ? MAY_NOT_BLOCK : 0);
+}
+
+static noinline int audit_inode_permission(struct inode *inode,
+					   u32 perms, u32 audited, u32 denied,
+					   int result,
+					   unsigned flags)
+{
+	struct common_audit_data ad;
+	struct inode_security_struct *isec = inode->i_security;
+	int rc;
+
+	ad.type = LSM_AUDIT_DATA_INODE;
+	ad.u.inode = inode;
+
+	rc = slow_avc_audit(current_sid(), isec->sid, isec->sclass, perms,
+			    audited, denied, result, &ad, flags);
+	if (rc)
+		return rc;
+	return 0;
+}
+
+static int selinux_inode_permission(struct inode *inode, int mask)
+{
+	const struct cred *cred = current_cred();
+	u32 perms;
+	bool from_access;
+	unsigned flags = mask & MAY_NOT_BLOCK;
+	struct inode_security_struct *isec;
+	u32 sid;
+	struct av_decision avd;
+	int rc, rc2;
+	u32 audited, denied;
+
+	from_access = mask & MAY_ACCESS;
+	mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
+
+	/* No permission to check.  Existence test. */
+	if (!mask)
+		return 0;
+
+	validate_creds(cred);
+
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+
+	perms = file_mask_to_av(inode->i_mode, mask);
+
+	sid = cred_sid(cred);
+	isec = inode->i_security;
+
+	rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0, &avd);
+	audited = avc_audit_required(perms, &avd, rc,
+				     from_access ? FILE__AUDIT_ACCESS : 0,
+				     &denied);
+	if (likely(!audited))
+		return rc;
+
+	rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
+	if (rc2)
+		return rc2;
+	return rc;
+}
+
+static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+	const struct cred *cred = current_cred();
+	unsigned int ia_valid = iattr->ia_valid;
+	__u32 av = FILE__WRITE;
+
+	/* ATTR_FORCE is just used for ATTR_KILL_S[UG]ID. */
+	if (ia_valid & ATTR_FORCE) {
+		ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_MODE |
+			      ATTR_FORCE);
+		if (!ia_valid)
+			return 0;
+	}
+
+	if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
+			ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
+		return dentry_has_perm(cred, dentry, FILE__SETATTR);
+
+	if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE)
+			&& !(ia_valid & ATTR_FILE))
+		av |= FILE__OPEN;
+
+	return dentry_has_perm(cred, dentry, av);
+}
+
+static int selinux_inode_getattr(const struct path *path)
+{
+	return path_has_perm(current_cred(), path, FILE__GETATTR);
+}
+
+static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
+{
+	const struct cred *cred = current_cred();
+
+	if (!strncmp(name, XATTR_SECURITY_PREFIX,
+		     sizeof XATTR_SECURITY_PREFIX - 1)) {
+		if (!strcmp(name, XATTR_NAME_CAPS)) {
+			if (!capable(CAP_SETFCAP))
+				return -EPERM;
+		} else if (!capable(CAP_SYS_ADMIN)) {
+			/* A different attribute in the security namespace.
+			   Restrict to administrator. */
+			return -EPERM;
+		}
+	}
+
+	/* Not an attribute we recognize, so just check the
+	   ordinary setattr permission. */
+	return dentry_has_perm(cred, dentry, FILE__SETATTR);
+}
+
+static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
+				  const void *value, size_t size, int flags)
+{
+	struct inode *inode = d_backing_inode(dentry);
+	struct inode_security_struct *isec = inode->i_security;
+	struct superblock_security_struct *sbsec;
+	struct common_audit_data ad;
+	u32 newsid, sid = current_sid();
+	int rc = 0;
+
+	if (strcmp(name, XATTR_NAME_SELINUX))
+		return selinux_inode_setotherxattr(dentry, name);
+
+	sbsec = inode->i_sb->s_security;
+	if (!(sbsec->flags & SBLABEL_MNT))
+		return -EOPNOTSUPP;
+
+	if (!inode_owner_or_capable(inode))
+		return -EPERM;
+
+	ad.type = LSM_AUDIT_DATA_DENTRY;
+	ad.u.dentry = dentry;
+
+	rc = avc_has_perm(sid, isec->sid, isec->sclass,
+			  FILE__RELABELFROM, &ad);
+	if (rc)
+		return rc;
+
+	rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
+	if (rc == -EINVAL) {
+		if (!capable(CAP_MAC_ADMIN)) {
+			struct audit_buffer *ab;
+			size_t audit_size;
+			const char *str;
+
+			/* We strip a nul only if it is at the end, otherwise the
+			 * context contains a nul and we should audit that */
+			if (value) {
+				str = value;
+				if (str[size - 1] == '\0')
+					audit_size = size - 1;
+				else
+					audit_size = size;
+			} else {
+				str = "";
+				audit_size = 0;
+			}
+			ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR);
+			audit_log_format(ab, "op=setxattr invalid_context=");
+			audit_log_n_untrustedstring(ab, value, audit_size);
+			audit_log_end(ab);
+
+			return rc;
+		}
+		rc = security_context_to_sid_force(value, size, &newsid);
+	}
+	if (rc)
+		return rc;
+
+	rc = avc_has_perm(sid, newsid, isec->sclass,
+			  FILE__RELABELTO, &ad);
+	if (rc)
+		return rc;
+
+	rc = security_validate_transition(isec->sid, newsid, sid,
+					  isec->sclass);
+	if (rc)
+		return rc;
+
+	return avc_has_perm(newsid,
+			    sbsec->sid,
+			    SECCLASS_FILESYSTEM,
+			    FILESYSTEM__ASSOCIATE,
+			    &ad);
+}
+
+static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
+					const void *value, size_t size,
+					int flags)
+{
+	struct inode *inode = d_backing_inode(dentry);
+	struct inode_security_struct *isec = inode->i_security;
+	u32 newsid;
+	int rc;
+
+	if (strcmp(name, XATTR_NAME_SELINUX)) {
+		/* Not an attribute we recognize, so nothing to do. */
+		return;
+	}
+
+	rc = security_context_to_sid_force(value, size, &newsid);
+	if (rc) {
+		printk(KERN_ERR "SELinux:  unable to map context to SID"
+		       "for (%s, %lu), rc=%d\n",
+		       inode->i_sb->s_id, inode->i_ino, -rc);
+		return;
+	}
+
+	isec->sclass = inode_mode_to_security_class(inode->i_mode);
+	isec->sid = newsid;
+	isec->initialized = 1;
+
+	return;
+}
+
+static int selinux_inode_getxattr(struct dentry *dentry, const char *name)
+{
+	const struct cred *cred = current_cred();
+
+	return dentry_has_perm(cred, dentry, FILE__GETATTR);
+}
+
+static int selinux_inode_listxattr(struct dentry *dentry)
+{
+	const struct cred *cred = current_cred();
+
+	return dentry_has_perm(cred, dentry, FILE__GETATTR);
+}
+
+static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
+{
+	if (strcmp(name, XATTR_NAME_SELINUX))
+		return selinux_inode_setotherxattr(dentry, name);
+
+	/* No one is allowed to remove a SELinux security label.
+	   You can change the label, but all data must be labeled. */
+	return -EACCES;
+}
+
+/*
+ * Copy the inode security context value to the user.
+ *
+ * Permission check is handled by selinux_inode_getxattr hook.
+ */
+static int selinux_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
+{
+	u32 size;
+	int error;
+	char *context = NULL;
+	struct inode_security_struct *isec = inode->i_security;
+
+	if (strcmp(name, XATTR_SELINUX_SUFFIX))
+		return -EOPNOTSUPP;
+
+	/*
+	 * If the caller has CAP_MAC_ADMIN, then get the raw context
+	 * value even if it is not defined by current policy; otherwise,
+	 * use the in-core value under current policy.
+	 * Use the non-auditing forms of the permission checks since
+	 * getxattr may be called by unprivileged processes commonly
+	 * and lack of permission just means that we fall back to the
+	 * in-core context value, not a denial.
+	 */
+	error = cap_capable(current_cred(), &init_user_ns, CAP_MAC_ADMIN,
+			    SECURITY_CAP_NOAUDIT);
+	if (!error)
+		error = cred_has_capability(current_cred(), CAP_MAC_ADMIN,
+					    SECURITY_CAP_NOAUDIT);
+	if (!error)
+		error = security_sid_to_context_force(isec->sid, &context,
+						      &size);
+	else
+		error = security_sid_to_context(isec->sid, &context, &size);
+	if (error)
+		return error;
+	error = size;
+	if (alloc) {
+		*buffer = context;
+		goto out_nofree;
+	}
+	kfree(context);
+out_nofree:
+	return error;
+}
+
+static int selinux_inode_setsecurity(struct inode *inode, const char *name,
+				     const void *value, size_t size, int flags)
+{
+	struct inode_security_struct *isec = inode->i_security;
+	u32 newsid;
+	int rc;
+
+	if (strcmp(name, XATTR_SELINUX_SUFFIX))
+		return -EOPNOTSUPP;
+
+	if (!value || !size)
+		return -EACCES;
+
+	rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
+	if (rc)
+		return rc;
+
+	isec->sclass = inode_mode_to_security_class(inode->i_mode);
+	isec->sid = newsid;
+	isec->initialized = 1;
+	return 0;
+}
+
+static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
+{
+	const int len = sizeof(XATTR_NAME_SELINUX);
+	if (buffer && len <= buffer_size)
+		memcpy(buffer, XATTR_NAME_SELINUX, len);
+	return len;
+}
+
+static void selinux_inode_getsecid(const struct inode *inode, u32 *secid)
+{
+	struct inode_security_struct *isec = inode->i_security;
+	*secid = isec->sid;
+}
+
+/* file security operations */
+
+static int selinux_revalidate_file_permission(struct file *file, int mask)
+{
+	const struct cred *cred = current_cred();
+	struct inode *inode = file_inode(file);
+
+	/* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */
+	if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
+		mask |= MAY_APPEND;
+
+	return file_has_perm(cred, file,
+			     file_mask_to_av(inode->i_mode, mask));
+}
+
+static int selinux_file_permission(struct file *file, int mask)
+{
+	struct inode *inode = file_inode(file);
+	struct file_security_struct *fsec = file->f_security;
+	struct inode_security_struct *isec = inode->i_security;
+	u32 sid = current_sid();
+
+	if (!mask)
+		/* No permission to check.  Existence test. */
+		return 0;
+
+	if (sid == fsec->sid && fsec->isid == isec->sid &&
+	    fsec->pseqno == avc_policy_seqno())
+		/* No change since file_open check. */
+		return 0;
+
+	return selinux_revalidate_file_permission(file, mask);
+}
+
+static int selinux_file_alloc_security(struct file *file)
+{
+	return file_alloc_security(file);
+}
+
+static void selinux_file_free_security(struct file *file)
+{
+	file_free_security(file);
+}
+
+/*
+ * Check whether a task has the ioctl permission and cmd
+ * operation to an inode.
+ */
+static int ioctl_has_perm(const struct cred *cred, struct file *file,
+		u32 requested, u16 cmd)
+{
+	struct common_audit_data ad;
+	struct file_security_struct *fsec = file->f_security;
+	struct inode *inode = file_inode(file);
+	struct inode_security_struct *isec = inode->i_security;
+	struct lsm_ioctlop_audit ioctl;
+	u32 ssid = cred_sid(cred);
+	int rc;
+	u8 driver = cmd >> 8;
+	u8 xperm = cmd & 0xff;
+
+	ad.type = LSM_AUDIT_DATA_IOCTL_OP;
+	ad.u.op = &ioctl;
+	ad.u.op->cmd = cmd;
+	ad.u.op->path = file->f_path;
+
+	if (ssid != fsec->sid) {
+		rc = avc_has_perm(ssid, fsec->sid,
+				SECCLASS_FD,
+				FD__USE,
+				&ad);
+		if (rc)
+			goto out;
+	}
+
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+
+	rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass,
+			requested, driver, xperm, &ad);
+out:
+	return rc;
+}
+
+static int selinux_file_ioctl(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	const struct cred *cred = current_cred();
+	int error = 0;
+
+	switch (cmd) {
+	case FIONREAD:
+	/* fall through */
+	case FIBMAP:
+	/* fall through */
+	case FIGETBSZ:
+	/* fall through */
+	case FS_IOC_GETFLAGS:
+	/* fall through */
+	case FS_IOC_GETVERSION:
+		error = file_has_perm(cred, file, FILE__GETATTR);
+		break;
+
+	case FS_IOC_SETFLAGS:
+	/* fall through */
+	case FS_IOC_SETVERSION:
+		error = file_has_perm(cred, file, FILE__SETATTR);
+		break;
+
+	/* sys_ioctl() checks */
+	case FIONBIO:
+	/* fall through */
+	case FIOASYNC:
+		error = file_has_perm(cred, file, 0);
+		break;
+
+	case KDSKBENT:
+	case KDSKBSENT:
+		error = cred_has_capability(cred, CAP_SYS_TTY_CONFIG,
+					    SECURITY_CAP_AUDIT);
+		break;
+
+	/* default case assumes that the command will go
+	 * to the file's ioctl() function.
+	 */
+	default:
+		error = ioctl_has_perm(cred, file, FILE__IOCTL, (u16) cmd);
+	}
+	return error;
+}
+
+static int default_noexec;
+
+static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
+{
+	const struct cred *cred = current_cred();
+	int rc = 0;
+
+	if (default_noexec &&
+	    (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
+				   (!shared && (prot & PROT_WRITE)))) {
+		/*
+		 * We are making executable an anonymous mapping or a
+		 * private file mapping that will also be writable.
+		 * This has an additional check.
+		 */
+		rc = cred_has_perm(cred, cred, PROCESS__EXECMEM);
+		if (rc)
+			goto error;
+	}
+
+	if (file) {
+		/* read access is always possible with a mapping */
+		u32 av = FILE__READ;
+
+		/* write access only matters if the mapping is shared */
+		if (shared && (prot & PROT_WRITE))
+			av |= FILE__WRITE;
+
+		if (prot & PROT_EXEC)
+			av |= FILE__EXECUTE;
+
+		return file_has_perm(cred, file, av);
+	}
+
+error:
+	return rc;
+}
+
+static int selinux_mmap_addr(unsigned long addr)
+{
+	int rc = 0;
+
+	if (addr < CONFIG_LSM_MMAP_MIN_ADDR) {
+		u32 sid = current_sid();
+		rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT,
+				  MEMPROTECT__MMAP_ZERO, NULL);
+	}
+
+	return rc;
+}
+
+static int selinux_mmap_file(struct file *file, unsigned long reqprot,
+			     unsigned long prot, unsigned long flags)
+{
+	if (selinux_checkreqprot)
+		prot = reqprot;
+
+	return file_map_prot_check(file, prot,
+				   (flags & MAP_TYPE) == MAP_SHARED);
+}
+
+static int selinux_file_mprotect(struct vm_area_struct *vma,
+				 unsigned long reqprot,
+				 unsigned long prot)
+{
+	const struct cred *cred = current_cred();
+
+	if (selinux_checkreqprot)
+		prot = reqprot;
+
+	if (default_noexec &&
+	    (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
+		int rc = 0;
+		if (vma->vm_start >= vma->vm_mm->start_brk &&
+		    vma->vm_end <= vma->vm_mm->brk) {
+			rc = cred_has_perm(cred, cred, PROCESS__EXECHEAP);
+		} else if (!vma->vm_file &&
+			   vma->vm_start <= vma->vm_mm->start_stack &&
+			   vma->vm_end >= vma->vm_mm->start_stack) {
+			rc = current_has_perm(current, PROCESS__EXECSTACK);
+		} else if (vma->vm_file && vma->anon_vma) {
+			/*
+			 * We are making executable a file mapping that has
+			 * had some COW done. Since pages might have been
+			 * written, check ability to execute the possibly
+			 * modified content.  This typically should only
+			 * occur for text relocations.
+			 */
+			rc = file_has_perm(cred, vma->vm_file, FILE__EXECMOD);
+		}
+		if (rc)
+			return rc;
+	}
+
+	return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED);
+}
+
+static int selinux_file_lock(struct file *file, unsigned int cmd)
+{
+	const struct cred *cred = current_cred();
+
+	return file_has_perm(cred, file, FILE__LOCK);
+}
+
+static int selinux_file_fcntl(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	const struct cred *cred = current_cred();
+	int err = 0;
+
+	switch (cmd) {
+	case F_SETFL:
+		if ((file->f_flags & O_APPEND) && !(arg & O_APPEND)) {
+			err = file_has_perm(cred, file, FILE__WRITE);
+			break;
+		}
+		/* fall through */
+	case F_SETOWN:
+	case F_SETSIG:
+	case F_GETFL:
+	case F_GETOWN:
+	case F_GETSIG:
+	case F_GETOWNER_UIDS:
+		/* Just check FD__USE permission */
+		err = file_has_perm(cred, file, 0);
+		break;
+	case F_GETLK:
+	case F_SETLK:
+	case F_SETLKW:
+	case F_OFD_GETLK:
+	case F_OFD_SETLK:
+	case F_OFD_SETLKW:
+#if BITS_PER_LONG == 32
+	case F_GETLK64:
+	case F_SETLK64:
+	case F_SETLKW64:
+#endif
+		err = file_has_perm(cred, file, FILE__LOCK);
+		break;
+	}
+
+	return err;
+}
+
+static void selinux_file_set_fowner(struct file *file)
+{
+	struct file_security_struct *fsec;
+
+	fsec = file->f_security;
+	fsec->fown_sid = current_sid();
+}
+
+static int selinux_file_send_sigiotask(struct task_struct *tsk,
+				       struct fown_struct *fown, int signum)
+{
+	struct file *file;
+	u32 sid = task_sid(tsk);
+	u32 perm;
+	struct file_security_struct *fsec;
+
+	/* struct fown_struct is never outside the context of a struct file */
+	file = container_of(fown, struct file, f_owner);
+
+	fsec = file->f_security;
+
+	if (!signum)
+		perm = signal_to_av(SIGIO); /* as per send_sigio_to_task */
+	else
+		perm = signal_to_av(signum);
+
+	return avc_has_perm(fsec->fown_sid, sid,
+			    SECCLASS_PROCESS, perm, NULL);
+}
+
+static int selinux_file_receive(struct file *file)
+{
+	const struct cred *cred = current_cred();
+
+	return file_has_perm(cred, file, file_to_av(file));
+}
+
+static int selinux_file_open(struct file *file, const struct cred *cred)
+{
+	struct file_security_struct *fsec;
+	struct inode_security_struct *isec;
+
+	fsec = file->f_security;
+	isec = file_inode(file)->i_security;
+	/*
+	 * Save inode label and policy sequence number
+	 * at open-time so that selinux_file_permission
+	 * can determine whether revalidation is necessary.
+	 * Task label is already saved in the file security
+	 * struct as its SID.
+	 */
+	fsec->isid = isec->sid;
+	fsec->pseqno = avc_policy_seqno();
+	/*
+	 * Since the inode label or policy seqno may have changed
+	 * between the selinux_inode_permission check and the saving
+	 * of state above, recheck that access is still permitted.
+	 * Otherwise, access might never be revalidated against the
+	 * new inode label or new policy.
+	 * This check is not redundant - do not remove.
+	 */
+	return file_path_has_perm(cred, file, open_file_to_av(file));
+}
+
+/* task security operations */
+
+static int selinux_task_create(unsigned long clone_flags)
+{
+	return current_has_perm(current, PROCESS__FORK);
+}
+
+/*
+ * allocate the SELinux part of blank credentials
+ */
+static int selinux_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+	struct task_security_struct *tsec;
+
+	tsec = kzalloc(sizeof(struct task_security_struct), gfp);
+	if (!tsec)
+		return -ENOMEM;
+
+	cred->security = tsec;
+	return 0;
+}
+
+/*
+ * detach and free the LSM part of a set of credentials
+ */
+static void selinux_cred_free(struct cred *cred)
+{
+	struct task_security_struct *tsec = cred->security;
+
+	/*
+	 * cred->security == NULL if security_cred_alloc_blank() or
+	 * security_prepare_creds() returned an error.
+	 */
+	BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
+	cred->security = (void *) 0x7UL;
+	kfree(tsec);
+}
+
+/*
+ * prepare a new set of credentials for modification
+ */
+static int selinux_cred_prepare(struct cred *new, const struct cred *old,
+				gfp_t gfp)
+{
+	const struct task_security_struct *old_tsec;
+	struct task_security_struct *tsec;
+
+	old_tsec = old->security;
+
+	tsec = kmemdup(old_tsec, sizeof(struct task_security_struct), gfp);
+	if (!tsec)
+		return -ENOMEM;
+
+	new->security = tsec;
+	return 0;
+}
+
+/*
+ * transfer the SELinux data to a blank set of creds
+ */
+static void selinux_cred_transfer(struct cred *new, const struct cred *old)
+{
+	const struct task_security_struct *old_tsec = old->security;
+	struct task_security_struct *tsec = new->security;
+
+	*tsec = *old_tsec;
+}
+
+/*
+ * set the security data for a kernel service
+ * - all the creation contexts are set to unlabelled
+ */
+static int selinux_kernel_act_as(struct cred *new, u32 secid)
+{
+	struct task_security_struct *tsec = new->security;
+	u32 sid = current_sid();
+	int ret;
+
+	ret = avc_has_perm(sid, secid,
+			   SECCLASS_KERNEL_SERVICE,
+			   KERNEL_SERVICE__USE_AS_OVERRIDE,
+			   NULL);
+	if (ret == 0) {
+		tsec->sid = secid;
+		tsec->create_sid = 0;
+		tsec->keycreate_sid = 0;
+		tsec->sockcreate_sid = 0;
+	}
+	return ret;
+}
+
+/*
+ * set the file creation context in a security record to the same as the
+ * objective context of the specified inode
+ */
+static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
+{
+	struct inode_security_struct *isec = inode->i_security;
+	struct task_security_struct *tsec = new->security;
+	u32 sid = current_sid();
+	int ret;
+
+	ret = avc_has_perm(sid, isec->sid,
+			   SECCLASS_KERNEL_SERVICE,
+			   KERNEL_SERVICE__CREATE_FILES_AS,
+			   NULL);
+
+	if (ret == 0)
+		tsec->create_sid = isec->sid;
+	return ret;
+}
+
+static int selinux_kernel_module_request(char *kmod_name)
+{
+	u32 sid;
+	struct common_audit_data ad;
+
+	sid = task_sid(current);
+
+	ad.type = LSM_AUDIT_DATA_KMOD;
+	ad.u.kmod_name = kmod_name;
+
+	return avc_has_perm(sid, SECINITSID_KERNEL, SECCLASS_SYSTEM,
+			    SYSTEM__MODULE_REQUEST, &ad);
+}
+
+static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
+{
+	return current_has_perm(p, PROCESS__SETPGID);
+}
+
+static int selinux_task_getpgid(struct task_struct *p)
+{
+	return current_has_perm(p, PROCESS__GETPGID);
+}
+
+static int selinux_task_getsid(struct task_struct *p)
+{
+	return current_has_perm(p, PROCESS__GETSESSION);
+}
+
+static void selinux_task_getsecid(struct task_struct *p, u32 *secid)
+{
+	*secid = task_sid(p);
+}
+
+static int selinux_task_setnice(struct task_struct *p, int nice)
+{
+	return current_has_perm(p, PROCESS__SETSCHED);
+}
+
+static int selinux_task_setioprio(struct task_struct *p, int ioprio)
+{
+	return current_has_perm(p, PROCESS__SETSCHED);
+}
+
+static int selinux_task_getioprio(struct task_struct *p)
+{
+	return current_has_perm(p, PROCESS__GETSCHED);
+}
+
+static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
+		struct rlimit *new_rlim)
+{
+	struct rlimit *old_rlim = p->signal->rlim + resource;
+
+	/* Control the ability to change the hard limit (whether
+	   lowering or raising it), so that the hard limit can
+	   later be used as a safe reset point for the soft limit
+	   upon context transitions.  See selinux_bprm_committing_creds. */
+	if (old_rlim->rlim_max != new_rlim->rlim_max)
+		return current_has_perm(p, PROCESS__SETRLIMIT);
+
+	return 0;
+}
+
+static int selinux_task_setscheduler(struct task_struct *p)
+{
+	return current_has_perm(p, PROCESS__SETSCHED);
+}
+
+static int selinux_task_getscheduler(struct task_struct *p)
+{
+	return current_has_perm(p, PROCESS__GETSCHED);
+}
+
+static int selinux_task_movememory(struct task_struct *p)
+{
+	return current_has_perm(p, PROCESS__SETSCHED);
+}
+
+static int selinux_task_kill(struct task_struct *p, struct siginfo *info,
+				int sig, u32 secid)
+{
+	u32 perm;
+	int rc;
+
+	if (!sig)
+		perm = PROCESS__SIGNULL; /* null signal; existence test */
+	else
+		perm = signal_to_av(sig);
+	if (secid)
+		rc = avc_has_perm(secid, task_sid(p),
+				  SECCLASS_PROCESS, perm, NULL);
+	else
+		rc = current_has_perm(p, perm);
+	return rc;
+}
+
+static int selinux_task_wait(struct task_struct *p)
+{
+	return task_has_perm(p, current, PROCESS__SIGCHLD);
+}
+
+static void selinux_task_to_inode(struct task_struct *p,
+				  struct inode *inode)
+{
+	struct inode_security_struct *isec = inode->i_security;
+	u32 sid = task_sid(p);
+
+	isec->sid = sid;
+	isec->initialized = 1;
+}
+
+/* Returns error only if unable to parse addresses */
+static int selinux_parse_skb_ipv4(struct sk_buff *skb,
+			struct common_audit_data *ad, u8 *proto)
+{
+	int offset, ihlen, ret = -EINVAL;
+	struct iphdr _iph, *ih;
+
+	offset = skb_network_offset(skb);
+	ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
+	if (ih == NULL)
+		goto out;
+
+	ihlen = ih->ihl * 4;
+	if (ihlen < sizeof(_iph))
+		goto out;
+
+	ad->u.net->v4info.saddr = ih->saddr;
+	ad->u.net->v4info.daddr = ih->daddr;
+	ret = 0;
+
+	if (proto)
+		*proto = ih->protocol;
+
+	switch (ih->protocol) {
+	case IPPROTO_TCP: {
+		struct tcphdr _tcph, *th;
+
+		if (ntohs(ih->frag_off) & IP_OFFSET)
+			break;
+
+		offset += ihlen;
+		th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+		if (th == NULL)
+			break;
+
+		ad->u.net->sport = th->source;
+		ad->u.net->dport = th->dest;
+		break;
+	}
+
+	case IPPROTO_UDP: {
+		struct udphdr _udph, *uh;
+
+		if (ntohs(ih->frag_off) & IP_OFFSET)
+			break;
+
+		offset += ihlen;
+		uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+		if (uh == NULL)
+			break;
+
+		ad->u.net->sport = uh->source;
+		ad->u.net->dport = uh->dest;
+		break;
+	}
+
+	case IPPROTO_DCCP: {
+		struct dccp_hdr _dccph, *dh;
+
+		if (ntohs(ih->frag_off) & IP_OFFSET)
+			break;
+
+		offset += ihlen;
+		dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+		if (dh == NULL)
+			break;
+
+		ad->u.net->sport = dh->dccph_sport;
+		ad->u.net->dport = dh->dccph_dport;
+		break;
+	}
+
+	default:
+		break;
+	}
+out:
+	return ret;
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+/* Returns error only if unable to parse addresses */
+static int selinux_parse_skb_ipv6(struct sk_buff *skb,
+			struct common_audit_data *ad, u8 *proto)
+{
+	u8 nexthdr;
+	int ret = -EINVAL, offset;
+	struct ipv6hdr _ipv6h, *ip6;
+	__be16 frag_off;
+
+	offset = skb_network_offset(skb);
+	ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+	if (ip6 == NULL)
+		goto out;
+
+	ad->u.net->v6info.saddr = ip6->saddr;
+	ad->u.net->v6info.daddr = ip6->daddr;
+	ret = 0;
+
+	nexthdr = ip6->nexthdr;
+	offset += sizeof(_ipv6h);
+	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+	if (offset < 0)
+		goto out;
+
+	if (proto)
+		*proto = nexthdr;
+
+	switch (nexthdr) {
+	case IPPROTO_TCP: {
+		struct tcphdr _tcph, *th;
+
+		th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+		if (th == NULL)
+			break;
+
+		ad->u.net->sport = th->source;
+		ad->u.net->dport = th->dest;
+		break;
+	}
+
+	case IPPROTO_UDP: {
+		struct udphdr _udph, *uh;
+
+		uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+		if (uh == NULL)
+			break;
+
+		ad->u.net->sport = uh->source;
+		ad->u.net->dport = uh->dest;
+		break;
+	}
+
+	case IPPROTO_DCCP: {
+		struct dccp_hdr _dccph, *dh;
+
+		dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+		if (dh == NULL)
+			break;
+
+		ad->u.net->sport = dh->dccph_sport;
+		ad->u.net->dport = dh->dccph_dport;
+		break;
+	}
+
+	/* includes fragments */
+	default:
+		break;
+	}
+out:
+	return ret;
+}
+
+#endif /* IPV6 */
+
+static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
+			     char **_addrp, int src, u8 *proto)
+{
+	char *addrp;
+	int ret;
+
+	switch (ad->u.net->family) {
+	case PF_INET:
+		ret = selinux_parse_skb_ipv4(skb, ad, proto);
+		if (ret)
+			goto parse_error;
+		addrp = (char *)(src ? &ad->u.net->v4info.saddr :
+				       &ad->u.net->v4info.daddr);
+		goto okay;
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	case PF_INET6:
+		ret = selinux_parse_skb_ipv6(skb, ad, proto);
+		if (ret)
+			goto parse_error;
+		addrp = (char *)(src ? &ad->u.net->v6info.saddr :
+				       &ad->u.net->v6info.daddr);
+		goto okay;
+#endif	/* IPV6 */
+	default:
+		addrp = NULL;
+		goto okay;
+	}
+
+parse_error:
+	printk(KERN_WARNING
+	       "SELinux: failure in selinux_parse_skb(),"
+	       " unable to parse packet\n");
+	return ret;
+
+okay:
+	if (_addrp)
+		*_addrp = addrp;
+	return 0;
+}
+
+/**
+ * selinux_skb_peerlbl_sid - Determine the peer label of a packet
+ * @skb: the packet
+ * @family: protocol family
+ * @sid: the packet's peer label SID
+ *
+ * Description:
+ * Check the various different forms of network peer labeling and determine
+ * the peer label/SID for the packet; most of the magic actually occurs in
+ * the security server function security_net_peersid_cmp().  The function
+ * returns zero if the value in @sid is valid (although it may be SECSID_NULL)
+ * or -EACCES if @sid is invalid due to inconsistencies with the different
+ * peer labels.
+ *
+ */
+static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
+{
+	int err;
+	u32 xfrm_sid;
+	u32 nlbl_sid;
+	u32 nlbl_type;
+
+	err = selinux_xfrm_skb_sid(skb, &xfrm_sid);
+	if (unlikely(err))
+		return -EACCES;
+	err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+	if (unlikely(err))
+		return -EACCES;
+
+	err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
+	if (unlikely(err)) {
+		printk(KERN_WARNING
+		       "SELinux: failure in selinux_skb_peerlbl_sid(),"
+		       " unable to determine packet's peer label\n");
+		return -EACCES;
+	}
+
+	return 0;
+}
+
+/**
+ * selinux_conn_sid - Determine the child socket label for a connection
+ * @sk_sid: the parent socket's SID
+ * @skb_sid: the packet's SID
+ * @conn_sid: the resulting connection SID
+ *
+ * If @skb_sid is valid then the user:role:type information from @sk_sid is
+ * combined with the MLS information from @skb_sid in order to create
+ * @conn_sid.  If @skb_sid is not valid then then @conn_sid is simply a copy
+ * of @sk_sid.  Returns zero on success, negative values on failure.
+ *
+ */
+static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
+{
+	int err = 0;
+
+	if (skb_sid != SECSID_NULL)
+		err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
+	else
+		*conn_sid = sk_sid;
+
+	return err;
+}
+
+/* socket security operations */
+
+static int socket_sockcreate_sid(const struct task_security_struct *tsec,
+				 u16 secclass, u32 *socksid)
+{
+	if (tsec->sockcreate_sid > SECSID_NULL) {
+		*socksid = tsec->sockcreate_sid;
+		return 0;
+	}
+
+	return security_transition_sid(tsec->sid, tsec->sid, secclass, NULL,
+				       socksid);
+}
+
+static int sock_has_perm(struct task_struct *task, struct sock *sk, u32 perms)
+{
+	struct sk_security_struct *sksec = sk->sk_security;
+	struct common_audit_data ad;
+	struct lsm_network_audit net = {0,};
+	u32 tsid = task_sid(task);
+
+	if (sksec->sid == SECINITSID_KERNEL)
+		return 0;
+
+	ad.type = LSM_AUDIT_DATA_NET;
+	ad.u.net = &net;
+	ad.u.net->sk = sk;
+
+	return avc_has_perm(tsid, sksec->sid, sksec->sclass, perms, &ad);
+}
+
+static int selinux_socket_create(int family, int type,
+				 int protocol, int kern)
+{
+	const struct task_security_struct *tsec = current_security();
+	u32 newsid;
+	u16 secclass;
+	int rc;
+
+	if (kern)
+		return 0;
+
+	secclass = socket_type_to_security_class(family, type, protocol);
+	rc = socket_sockcreate_sid(tsec, secclass, &newsid);
+	if (rc)
+		return rc;
+
+	return avc_has_perm(tsec->sid, newsid, secclass, SOCKET__CREATE, NULL);
+}
+
+static int selinux_socket_post_create(struct socket *sock, int family,
+				      int type, int protocol, int kern)
+{
+	const struct task_security_struct *tsec = current_security();
+	struct inode_security_struct *isec = SOCK_INODE(sock)->i_security;
+	struct sk_security_struct *sksec;
+	int err = 0;
+
+	isec->sclass = socket_type_to_security_class(family, type, protocol);
+
+	if (kern)
+		isec->sid = SECINITSID_KERNEL;
+	else {
+		err = socket_sockcreate_sid(tsec, isec->sclass, &(isec->sid));
+		if (err)
+			return err;
+	}
+
+	isec->initialized = 1;
+
+	if (sock->sk) {
+		sksec = sock->sk->sk_security;
+		sksec->sid = isec->sid;
+		sksec->sclass = isec->sclass;
+		err = selinux_netlbl_socket_post_create(sock->sk, family);
+	}
+
+	return err;
+}
+
+/* Range of port numbers used to automatically bind.
+   Need to determine whether we should perform a name_bind
+   permission check between the socket and the port number. */
+
+static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
+{
+	struct sock *sk = sock->sk;
+	u16 family;
+	int err;
+
+	err = sock_has_perm(current, sk, SOCKET__BIND);
+	if (err)
+		goto out;
+
+	/*
+	 * If PF_INET or PF_INET6, check name_bind permission for the port.
+	 * Multiple address binding for SCTP is not supported yet: we just
+	 * check the first address now.
+	 */
+	family = sk->sk_family;
+	if (family == PF_INET || family == PF_INET6) {
+		char *addrp;
+		struct sk_security_struct *sksec = sk->sk_security;
+		struct common_audit_data ad;
+		struct lsm_network_audit net = {0,};
+		struct sockaddr_in *addr4 = NULL;
+		struct sockaddr_in6 *addr6 = NULL;
+		unsigned short snum;
+		u32 sid, node_perm;
+
+		if (family == PF_INET) {
+			addr4 = (struct sockaddr_in *)address;
+			snum = ntohs(addr4->sin_port);
+			addrp = (char *)&addr4->sin_addr.s_addr;
+		} else {
+			addr6 = (struct sockaddr_in6 *)address;
+			snum = ntohs(addr6->sin6_port);
+			addrp = (char *)&addr6->sin6_addr.s6_addr;
+		}
+
+		if (snum) {
+			int low, high;
+
+			inet_get_local_port_range(sock_net(sk), &low, &high);
+
+			if (snum < max(PROT_SOCK, low) || snum > high) {
+				err = sel_netport_sid(sk->sk_protocol,
+						      snum, &sid);
+				if (err)
+					goto out;
+				ad.type = LSM_AUDIT_DATA_NET;
+				ad.u.net = &net;
+				ad.u.net->sport = htons(snum);
+				ad.u.net->family = family;
+				err = avc_has_perm(sksec->sid, sid,
+						   sksec->sclass,
+						   SOCKET__NAME_BIND, &ad);
+				if (err)
+					goto out;
+			}
+		}
+
+		switch (sksec->sclass) {
+		case SECCLASS_TCP_SOCKET:
+			node_perm = TCP_SOCKET__NODE_BIND;
+			break;
+
+		case SECCLASS_UDP_SOCKET:
+			node_perm = UDP_SOCKET__NODE_BIND;
+			break;
+
+		case SECCLASS_DCCP_SOCKET:
+			node_perm = DCCP_SOCKET__NODE_BIND;
+			break;
+
+		default:
+			node_perm = RAWIP_SOCKET__NODE_BIND;
+			break;
+		}
+
+		err = sel_netnode_sid(addrp, family, &sid);
+		if (err)
+			goto out;
+
+		ad.type = LSM_AUDIT_DATA_NET;
+		ad.u.net = &net;
+		ad.u.net->sport = htons(snum);
+		ad.u.net->family = family;
+
+		if (family == PF_INET)
+			ad.u.net->v4info.saddr = addr4->sin_addr.s_addr;
+		else
+			ad.u.net->v6info.saddr = addr6->sin6_addr;
+
+		err = avc_has_perm(sksec->sid, sid,
+				   sksec->sclass, node_perm, &ad);
+		if (err)
+			goto out;
+	}
+out:
+	return err;
+}
+
+static int selinux_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen)
+{
+	struct sock *sk = sock->sk;
+	struct sk_security_struct *sksec = sk->sk_security;
+	int err;
+
+	err = sock_has_perm(current, sk, SOCKET__CONNECT);
+	if (err)
+		return err;
+
+	/*
+	 * If a TCP or DCCP socket, check name_connect permission for the port.
+	 */
+	if (sksec->sclass == SECCLASS_TCP_SOCKET ||
+	    sksec->sclass == SECCLASS_DCCP_SOCKET) {
+		struct common_audit_data ad;
+		struct lsm_network_audit net = {0,};
+		struct sockaddr_in *addr4 = NULL;
+		struct sockaddr_in6 *addr6 = NULL;
+		unsigned short snum;
+		u32 sid, perm;
+
+		if (sk->sk_family == PF_INET) {
+			addr4 = (struct sockaddr_in *)address;
+			if (addrlen < sizeof(struct sockaddr_in))
+				return -EINVAL;
+			snum = ntohs(addr4->sin_port);
+		} else {
+			addr6 = (struct sockaddr_in6 *)address;
+			if (addrlen < SIN6_LEN_RFC2133)
+				return -EINVAL;
+			snum = ntohs(addr6->sin6_port);
+		}
+
+		err = sel_netport_sid(sk->sk_protocol, snum, &sid);
+		if (err)
+			goto out;
+
+		perm = (sksec->sclass == SECCLASS_TCP_SOCKET) ?
+		       TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT;
+
+		ad.type = LSM_AUDIT_DATA_NET;
+		ad.u.net = &net;
+		ad.u.net->dport = htons(snum);
+		ad.u.net->family = sk->sk_family;
+		err = avc_has_perm(sksec->sid, sid, sksec->sclass, perm, &ad);
+		if (err)
+			goto out;
+	}
+
+	err = selinux_netlbl_socket_connect(sk, address);
+
+out:
+	return err;
+}
+
+static int selinux_socket_listen(struct socket *sock, int backlog)
+{
+	return sock_has_perm(current, sock->sk, SOCKET__LISTEN);
+}
+
+static int selinux_socket_accept(struct socket *sock, struct socket *newsock)
+{
+	int err;
+	struct inode_security_struct *isec;
+	struct inode_security_struct *newisec;
+
+	err = sock_has_perm(current, sock->sk, SOCKET__ACCEPT);
+	if (err)
+		return err;
+
+	newisec = SOCK_INODE(newsock)->i_security;
+
+	isec = SOCK_INODE(sock)->i_security;
+	newisec->sclass = isec->sclass;
+	newisec->sid = isec->sid;
+	newisec->initialized = 1;
+
+	return 0;
+}
+
+static int selinux_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+				  int size)
+{
+	return sock_has_perm(current, sock->sk, SOCKET__WRITE);
+}
+
+static int selinux_socket_recvmsg(struct socket *sock, struct msghdr *msg,
+				  int size, int flags)
+{
+	return sock_has_perm(current, sock->sk, SOCKET__READ);
+}
+
+static int selinux_socket_getsockname(struct socket *sock)
+{
+	return sock_has_perm(current, sock->sk, SOCKET__GETATTR);
+}
+
+static int selinux_socket_getpeername(struct socket *sock)
+{
+	return sock_has_perm(current, sock->sk, SOCKET__GETATTR);
+}
+
+static int selinux_socket_setsockopt(struct socket *sock, int level, int optname)
+{
+	int err;
+
+	err = sock_has_perm(current, sock->sk, SOCKET__SETOPT);
+	if (err)
+		return err;
+
+	return selinux_netlbl_socket_setsockopt(sock, level, optname);
+}
+
+static int selinux_socket_getsockopt(struct socket *sock, int level,
+				     int optname)
+{
+	return sock_has_perm(current, sock->sk, SOCKET__GETOPT);
+}
+
+static int selinux_socket_shutdown(struct socket *sock, int how)
+{
+	return sock_has_perm(current, sock->sk, SOCKET__SHUTDOWN);
+}
+
+static int selinux_socket_unix_stream_connect(struct sock *sock,
+					      struct sock *other,
+					      struct sock *newsk)
+{
+	struct sk_security_struct *sksec_sock = sock->sk_security;
+	struct sk_security_struct *sksec_other = other->sk_security;
+	struct sk_security_struct *sksec_new = newsk->sk_security;
+	struct common_audit_data ad;
+	struct lsm_network_audit net = {0,};
+	int err;
+
+	ad.type = LSM_AUDIT_DATA_NET;
+	ad.u.net = &net;
+	ad.u.net->sk = other;
+
+	err = avc_has_perm(sksec_sock->sid, sksec_other->sid,
+			   sksec_other->sclass,
+			   UNIX_STREAM_SOCKET__CONNECTTO, &ad);
+	if (err)
+		return err;
+
+	/* server child socket */
+	sksec_new->peer_sid = sksec_sock->sid;
+	err = security_sid_mls_copy(sksec_other->sid, sksec_sock->sid,
+				    &sksec_new->sid);
+	if (err)
+		return err;
+
+	/* connecting socket */
+	sksec_sock->peer_sid = sksec_new->sid;
+
+	return 0;
+}
+
+static int selinux_socket_unix_may_send(struct socket *sock,
+					struct socket *other)
+{
+	struct sk_security_struct *ssec = sock->sk->sk_security;
+	struct sk_security_struct *osec = other->sk->sk_security;
+	struct common_audit_data ad;
+	struct lsm_network_audit net = {0,};
+
+	ad.type = LSM_AUDIT_DATA_NET;
+	ad.u.net = &net;
+	ad.u.net->sk = other->sk;
+
+	return avc_has_perm(ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO,
+			    &ad);
+}
+
+static int selinux_inet_sys_rcv_skb(struct net *ns, int ifindex,
+				    char *addrp, u16 family, u32 peer_sid,
+				    struct common_audit_data *ad)
+{
+	int err;
+	u32 if_sid;
+	u32 node_sid;
+
+	err = sel_netif_sid(ns, ifindex, &if_sid);
+	if (err)
+		return err;
+	err = avc_has_perm(peer_sid, if_sid,
+			   SECCLASS_NETIF, NETIF__INGRESS, ad);
+	if (err)
+		return err;
+
+	err = sel_netnode_sid(addrp, family, &node_sid);
+	if (err)
+		return err;
+	return avc_has_perm(peer_sid, node_sid,
+			    SECCLASS_NODE, NODE__RECVFROM, ad);
+}
+
+static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
+				       u16 family)
+{
+	int err = 0;
+	struct sk_security_struct *sksec = sk->sk_security;
+	u32 sk_sid = sksec->sid;
+	struct common_audit_data ad;
+	struct lsm_network_audit net = {0,};
+	char *addrp;
+
+	ad.type = LSM_AUDIT_DATA_NET;
+	ad.u.net = &net;
+	ad.u.net->netif = skb->skb_iif;
+	ad.u.net->family = family;
+	err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
+	if (err)
+		return err;
+
+	if (selinux_secmark_enabled()) {
+		err = avc_has_perm(sk_sid, skb->secmark, SECCLASS_PACKET,
+				   PACKET__RECV, &ad);
+		if (err)
+			return err;
+	}
+
+	err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, &ad);
+	if (err)
+		return err;
+	err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad);
+
+	return err;
+}
+
+static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+	int err;
+	struct sk_security_struct *sksec = sk->sk_security;
+	u16 family = sk->sk_family;
+	u32 sk_sid = sksec->sid;
+	struct common_audit_data ad;
+	struct lsm_network_audit net = {0,};
+	char *addrp;
+	u8 secmark_active;
+	u8 peerlbl_active;
+
+	if (family != PF_INET && family != PF_INET6)
+		return 0;
+
+	/* Handle mapped IPv4 packets arriving via IPv6 sockets */
+	if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
+		family = PF_INET;
+
+	/* If any sort of compatibility mode is enabled then handoff processing
+	 * to the selinux_sock_rcv_skb_compat() function to deal with the
+	 * special handling.  We do this in an attempt to keep this function
+	 * as fast and as clean as possible. */
+	if (!selinux_policycap_netpeer)
+		return selinux_sock_rcv_skb_compat(sk, skb, family);
+
+	secmark_active = selinux_secmark_enabled();
+	peerlbl_active = selinux_peerlbl_enabled();
+	if (!secmark_active && !peerlbl_active)
+		return 0;
+
+	ad.type = LSM_AUDIT_DATA_NET;
+	ad.u.net = &net;
+	ad.u.net->netif = skb->skb_iif;
+	ad.u.net->family = family;
+	err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
+	if (err)
+		return err;
+
+	if (peerlbl_active) {
+		u32 peer_sid;
+
+		err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
+		if (err)
+			return err;
+		err = selinux_inet_sys_rcv_skb(sock_net(sk), skb->skb_iif,
+					       addrp, family, peer_sid, &ad);
+		if (err) {
+			selinux_netlbl_err(skb, err, 0);
+			return err;
+		}
+		err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
+				   PEER__RECV, &ad);
+		if (err) {
+			selinux_netlbl_err(skb, err, 0);
+			return err;
+		}
+	}
+
+	if (secmark_active) {
+		err = avc_has_perm(sk_sid, skb->secmark, SECCLASS_PACKET,
+				   PACKET__RECV, &ad);
+		if (err)
+			return err;
+	}
+
+	return err;
+}
+
+static int selinux_socket_getpeersec_stream(struct socket *sock, char __user *optval,
+					    int __user *optlen, unsigned len)
+{
+	int err = 0;
+	char *scontext;
+	u32 scontext_len;
+	struct sk_security_struct *sksec = sock->sk->sk_security;
+	u32 peer_sid = SECSID_NULL;
+
+	if (sksec->sclass == SECCLASS_UNIX_STREAM_SOCKET ||
+	    sksec->sclass == SECCLASS_TCP_SOCKET)
+		peer_sid = sksec->peer_sid;
+	if (peer_sid == SECSID_NULL)
+		return -ENOPROTOOPT;
+
+	err = security_sid_to_context(peer_sid, &scontext, &scontext_len);
+	if (err)
+		return err;
+
+	if (scontext_len > len) {
+		err = -ERANGE;
+		goto out_len;
+	}
+
+	if (copy_to_user(optval, scontext, scontext_len))
+		err = -EFAULT;
+
+out_len:
+	if (put_user(scontext_len, optlen))
+		err = -EFAULT;
+	kfree(scontext);
+	return err;
+}
+
+static int selinux_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
+{
+	u32 peer_secid = SECSID_NULL;
+	u16 family;
+
+	if (skb && skb->protocol == htons(ETH_P_IP))
+		family = PF_INET;
+	else if (skb && skb->protocol == htons(ETH_P_IPV6))
+		family = PF_INET6;
+	else if (sock)
+		family = sock->sk->sk_family;
+	else
+		goto out;
+
+	if (sock && family == PF_UNIX)
+		selinux_inode_getsecid(SOCK_INODE(sock), &peer_secid);
+	else if (skb)
+		selinux_skb_peerlbl_sid(skb, family, &peer_secid);
+
+out:
+	*secid = peer_secid;
+	if (peer_secid == SECSID_NULL)
+		return -EINVAL;
+	return 0;
+}
+
+static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority)
+{
+	struct sk_security_struct *sksec;
+
+	sksec = kzalloc(sizeof(*sksec), priority);
+	if (!sksec)
+		return -ENOMEM;
+
+	sksec->peer_sid = SECINITSID_UNLABELED;
+	sksec->sid = SECINITSID_UNLABELED;
+	sksec->sclass = SECCLASS_SOCKET;
+	selinux_netlbl_sk_security_reset(sksec);
+	sk->sk_security = sksec;
+
+	return 0;
+}
+
+static void selinux_sk_free_security(struct sock *sk)
+{
+	struct sk_security_struct *sksec = sk->sk_security;
+
+	sk->sk_security = NULL;
+	selinux_netlbl_sk_security_free(sksec);
+	kfree(sksec);
+}
+
+static void selinux_sk_clone_security(const struct sock *sk, struct sock *newsk)
+{
+	struct sk_security_struct *sksec = sk->sk_security;
+	struct sk_security_struct *newsksec = newsk->sk_security;
+
+	newsksec->sid = sksec->sid;
+	newsksec->peer_sid = sksec->peer_sid;
+	newsksec->sclass = sksec->sclass;
+
+	selinux_netlbl_sk_security_reset(newsksec);
+}
+
+static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
+{
+	if (!sk)
+		*secid = SECINITSID_ANY_SOCKET;
+	else {
+		struct sk_security_struct *sksec = sk->sk_security;
+
+		*secid = sksec->sid;
+	}
+}
+
+static void selinux_sock_graft(struct sock *sk, struct socket *parent)
+{
+	struct inode_security_struct *isec = SOCK_INODE(parent)->i_security;
+	struct sk_security_struct *sksec = sk->sk_security;
+
+	if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6 ||
+	    sk->sk_family == PF_UNIX)
+		isec->sid = sksec->sid;
+	sksec->sclass = isec->sclass;
+}
+
+static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+				     struct request_sock *req)
+{
+	struct sk_security_struct *sksec = sk->sk_security;
+	int err;
+	u16 family = req->rsk_ops->family;
+	u32 connsid;
+	u32 peersid;
+
+	err = selinux_skb_peerlbl_sid(skb, family, &peersid);
+	if (err)
+		return err;
+	err = selinux_conn_sid(sksec->sid, peersid, &connsid);
+	if (err)
+		return err;
+	req->secid = connsid;
+	req->peer_secid = peersid;
+
+	return selinux_netlbl_inet_conn_request(req, family);
+}
+
+static void selinux_inet_csk_clone(struct sock *newsk,
+				   const struct request_sock *req)
+{
+	struct sk_security_struct *newsksec = newsk->sk_security;
+
+	newsksec->sid = req->secid;
+	newsksec->peer_sid = req->peer_secid;
+	/* NOTE: Ideally, we should also get the isec->sid for the
+	   new socket in sync, but we don't have the isec available yet.
+	   So we will wait until sock_graft to do it, by which
+	   time it will have been created and available. */
+
+	/* We don't need to take any sort of lock here as we are the only
+	 * thread with access to newsksec */
+	selinux_netlbl_inet_csk_clone(newsk, req->rsk_ops->family);
+}
+
+static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
+{
+	u16 family = sk->sk_family;
+	struct sk_security_struct *sksec = sk->sk_security;
+
+	/* handle mapped IPv4 packets arriving via IPv6 sockets */
+	if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
+		family = PF_INET;
+
+	selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
+}
+
+static int selinux_secmark_relabel_packet(u32 sid)
+{
+	const struct task_security_struct *__tsec;
+	u32 tsid;
+
+	__tsec = current_security();
+	tsid = __tsec->sid;
+
+	return avc_has_perm(tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL);
+}
+
+static void selinux_secmark_refcount_inc(void)
+{
+	atomic_inc(&selinux_secmark_refcount);
+}
+
+static void selinux_secmark_refcount_dec(void)
+{
+	atomic_dec(&selinux_secmark_refcount);
+}
+
+static void selinux_req_classify_flow(const struct request_sock *req,
+				      struct flowi *fl)
+{
+	fl->flowi_secid = req->secid;
+}
+
+static int selinux_tun_dev_alloc_security(void **security)
+{
+	struct tun_security_struct *tunsec;
+
+	tunsec = kzalloc(sizeof(*tunsec), GFP_KERNEL);
+	if (!tunsec)
+		return -ENOMEM;
+	tunsec->sid = current_sid();
+
+	*security = tunsec;
+	return 0;
+}
+
+static void selinux_tun_dev_free_security(void *security)
+{
+	kfree(security);
+}
+
+static int selinux_tun_dev_create(void)
+{
+	u32 sid = current_sid();
+
+	/* we aren't taking into account the "sockcreate" SID since the socket
+	 * that is being created here is not a socket in the traditional sense,
+	 * instead it is a private sock, accessible only to the kernel, and
+	 * representing a wide range of network traffic spanning multiple
+	 * connections unlike traditional sockets - check the TUN driver to
+	 * get a better understanding of why this socket is special */
+
+	return avc_has_perm(sid, sid, SECCLASS_TUN_SOCKET, TUN_SOCKET__CREATE,
+			    NULL);
+}
+
+static int selinux_tun_dev_attach_queue(void *security)
+{
+	struct tun_security_struct *tunsec = security;
+
+	return avc_has_perm(current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET,
+			    TUN_SOCKET__ATTACH_QUEUE, NULL);
+}
+
+static int selinux_tun_dev_attach(struct sock *sk, void *security)
+{
+	struct tun_security_struct *tunsec = security;
+	struct sk_security_struct *sksec = sk->sk_security;
+
+	/* we don't currently perform any NetLabel based labeling here and it
+	 * isn't clear that we would want to do so anyway; while we could apply
+	 * labeling without the support of the TUN user the resulting labeled
+	 * traffic from the other end of the connection would almost certainly
+	 * cause confusion to the TUN user that had no idea network labeling
+	 * protocols were being used */
+
+	sksec->sid = tunsec->sid;
+	sksec->sclass = SECCLASS_TUN_SOCKET;
+
+	return 0;
+}
+
+static int selinux_tun_dev_open(void *security)
+{
+	struct tun_security_struct *tunsec = security;
+	u32 sid = current_sid();
+	int err;
+
+	err = avc_has_perm(sid, tunsec->sid, SECCLASS_TUN_SOCKET,
+			   TUN_SOCKET__RELABELFROM, NULL);
+	if (err)
+		return err;
+	err = avc_has_perm(sid, sid, SECCLASS_TUN_SOCKET,
+			   TUN_SOCKET__RELABELTO, NULL);
+	if (err)
+		return err;
+	tunsec->sid = sid;
+
+	return 0;
+}
+
+static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
+{
+	int err = 0;
+	u32 perm;
+	struct nlmsghdr *nlh;
+	struct sk_security_struct *sksec = sk->sk_security;
+
+	if (skb->len < NLMSG_HDRLEN) {
+		err = -EINVAL;
+		goto out;
+	}
+	nlh = nlmsg_hdr(skb);
+
+	err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
+	if (err) {
+		if (err == -EINVAL) {
+			printk(KERN_WARNING
+			       "SELinux: unrecognized netlink message:"
+			       " protocol=%hu nlmsg_type=%hu sclass=%s\n",
+			       sk->sk_protocol, nlh->nlmsg_type,
+			       secclass_map[sksec->sclass - 1].name);
+			if (!selinux_enforcing || security_get_allow_unknown())
+				err = 0;
+		}
+
+		/* Ignore */
+		if (err == -ENOENT)
+			err = 0;
+		goto out;
+	}
+
+	err = sock_has_perm(current, sk, perm);
+out:
+	return err;
+}
+
+#ifdef CONFIG_NETFILTER
+
+static unsigned int selinux_ip_forward(struct sk_buff *skb,
+				       const struct net_device *indev,
+				       u16 family)
+{
+	int err;
+	char *addrp;
+	u32 peer_sid;
+	struct common_audit_data ad;
+	struct lsm_network_audit net = {0,};
+	u8 secmark_active;
+	u8 netlbl_active;
+	u8 peerlbl_active;
+
+	if (!selinux_policycap_netpeer)
+		return NF_ACCEPT;
+
+	secmark_active = selinux_secmark_enabled();
+	netlbl_active = netlbl_enabled();
+	peerlbl_active = selinux_peerlbl_enabled();
+	if (!secmark_active && !peerlbl_active)
+		return NF_ACCEPT;
+
+	if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0)
+		return NF_DROP;
+
+	ad.type = LSM_AUDIT_DATA_NET;
+	ad.u.net = &net;
+	ad.u.net->netif = indev->ifindex;
+	ad.u.net->family = family;
+	if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
+		return NF_DROP;
+
+	if (peerlbl_active) {
+		err = selinux_inet_sys_rcv_skb(dev_net(indev), indev->ifindex,
+					       addrp, family, peer_sid, &ad);
+		if (err) {
+			selinux_netlbl_err(skb, err, 1);
+			return NF_DROP;
+		}
+	}
+
+	if (secmark_active)
+		if (avc_has_perm(peer_sid, skb->secmark,
+				 SECCLASS_PACKET, PACKET__FORWARD_IN, &ad))
+			return NF_DROP;
+
+	if (netlbl_active)
+		/* we do this in the FORWARD path and not the POST_ROUTING
+		 * path because we want to make sure we apply the necessary
+		 * labeling before IPsec is applied so we can leverage AH
+		 * protection */
+		if (selinux_netlbl_skbuff_setsid(skb, family, peer_sid) != 0)
+			return NF_DROP;
+
+	return NF_ACCEPT;
+}
+
+static unsigned int selinux_ipv4_forward(void *priv,
+					 struct sk_buff *skb,
+					 const struct nf_hook_state *state)
+{
+	return selinux_ip_forward(skb, state->in, PF_INET);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static unsigned int selinux_ipv6_forward(void *priv,
+					 struct sk_buff *skb,
+					 const struct nf_hook_state *state)
+{
+	return selinux_ip_forward(skb, state->in, PF_INET6);
+}
+#endif	/* IPV6 */
+
+static unsigned int selinux_ip_output(struct sk_buff *skb,
+				      u16 family)
+{
+	struct sock *sk;
+	u32 sid;
+
+	if (!netlbl_enabled())
+		return NF_ACCEPT;
+
+	/* we do this in the LOCAL_OUT path and not the POST_ROUTING path
+	 * because we want to make sure we apply the necessary labeling
+	 * before IPsec is applied so we can leverage AH protection */
+	sk = skb->sk;
+	if (sk) {
+		struct sk_security_struct *sksec;
+
+		if (sk_listener(sk))
+			/* if the socket is the listening state then this
+			 * packet is a SYN-ACK packet which means it needs to
+			 * be labeled based on the connection/request_sock and
+			 * not the parent socket.  unfortunately, we can't
+			 * lookup the request_sock yet as it isn't queued on
+			 * the parent socket until after the SYN-ACK is sent.
+			 * the "solution" is to simply pass the packet as-is
+			 * as any IP option based labeling should be copied
+			 * from the initial connection request (in the IP
+			 * layer).  it is far from ideal, but until we get a
+			 * security label in the packet itself this is the
+			 * best we can do. */
+			return NF_ACCEPT;
+
+		/* standard practice, label using the parent socket */
+		sksec = sk->sk_security;
+		sid = sksec->sid;
+	} else
+		sid = SECINITSID_KERNEL;
+	if (selinux_netlbl_skbuff_setsid(skb, family, sid) != 0)
+		return NF_DROP;
+
+	return NF_ACCEPT;
+}
+
+static unsigned int selinux_ipv4_output(void *priv,
+					struct sk_buff *skb,
+					const struct nf_hook_state *state)
+{
+	return selinux_ip_output(skb, PF_INET);
+}
+
+static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
+						int ifindex,
+						u16 family)
+{
+	struct sock *sk = skb_to_full_sk(skb);
+	struct sk_security_struct *sksec;
+	struct common_audit_data ad;
+	struct lsm_network_audit net = {0,};
+	char *addrp;
+	u8 proto;
+
+	if (sk == NULL)
+		return NF_ACCEPT;
+	sksec = sk->sk_security;
+
+	ad.type = LSM_AUDIT_DATA_NET;
+	ad.u.net = &net;
+	ad.u.net->netif = ifindex;
+	ad.u.net->family = family;
+	if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto))
+		return NF_DROP;
+
+	if (selinux_secmark_enabled())
+		if (avc_has_perm(sksec->sid, skb->secmark,
+				 SECCLASS_PACKET, PACKET__SEND, &ad))
+			return NF_DROP_ERR(-ECONNREFUSED);
+
+	if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
+		return NF_DROP_ERR(-ECONNREFUSED);
+
+	return NF_ACCEPT;
+}
+
+static unsigned int selinux_ip_postroute(struct sk_buff *skb,
+					 const struct net_device *outdev,
+					 u16 family)
+{
+	u32 secmark_perm;
+	u32 peer_sid;
+	int ifindex = outdev->ifindex;
+	struct sock *sk;
+	struct common_audit_data ad;
+	struct lsm_network_audit net = {0,};
+	char *addrp;
+	u8 secmark_active;
+	u8 peerlbl_active;
+
+	/* If any sort of compatibility mode is enabled then handoff processing
+	 * to the selinux_ip_postroute_compat() function to deal with the
+	 * special handling.  We do this in an attempt to keep this function
+	 * as fast and as clean as possible. */
+	if (!selinux_policycap_netpeer)
+		return selinux_ip_postroute_compat(skb, ifindex, family);
+
+	secmark_active = selinux_secmark_enabled();
+	peerlbl_active = selinux_peerlbl_enabled();
+	if (!secmark_active && !peerlbl_active)
+		return NF_ACCEPT;
+
+	sk = skb_to_full_sk(skb);
+
+#ifdef CONFIG_XFRM
+	/* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
+	 * packet transformation so allow the packet to pass without any checks
+	 * since we'll have another chance to perform access control checks
+	 * when the packet is on it's final way out.
+	 * NOTE: there appear to be some IPv6 multicast cases where skb->dst
+	 *       is NULL, in this case go ahead and apply access control.
+	 * NOTE: if this is a local socket (skb->sk != NULL) that is in the
+	 *       TCP listening state we cannot wait until the XFRM processing
+	 *       is done as we will miss out on the SA label if we do;
+	 *       unfortunately, this means more work, but it is only once per
+	 *       connection. */
+	if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
+	    !(sk && sk_listener(sk)))
+		return NF_ACCEPT;
+#endif
+
+	if (sk == NULL) {
+		/* Without an associated socket the packet is either coming
+		 * from the kernel or it is being forwarded; check the packet
+		 * to determine which and if the packet is being forwarded
+		 * query the packet directly to determine the security label. */
+		if (skb->skb_iif) {
+			secmark_perm = PACKET__FORWARD_OUT;
+			if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
+				return NF_DROP;
+		} else {
+			secmark_perm = PACKET__SEND;
+			peer_sid = SECINITSID_KERNEL;
+		}
+	} else if (sk_listener(sk)) {
+		/* Locally generated packet but the associated socket is in the
+		 * listening state which means this is a SYN-ACK packet.  In
+		 * this particular case the correct security label is assigned
+		 * to the connection/request_sock but unfortunately we can't
+		 * query the request_sock as it isn't queued on the parent
+		 * socket until after the SYN-ACK packet is sent; the only
+		 * viable choice is to regenerate the label like we do in
+		 * selinux_inet_conn_request().  See also selinux_ip_output()
+		 * for similar problems. */
+		u32 skb_sid;
+		struct sk_security_struct *sksec;
+
+		sksec = sk->sk_security;
+		if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
+			return NF_DROP;
+		/* At this point, if the returned skb peerlbl is SECSID_NULL
+		 * and the packet has been through at least one XFRM
+		 * transformation then we must be dealing with the "final"
+		 * form of labeled IPsec packet; since we've already applied
+		 * all of our access controls on this packet we can safely
+		 * pass the packet. */
+		if (skb_sid == SECSID_NULL) {
+			switch (family) {
+			case PF_INET:
+				if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
+					return NF_ACCEPT;
+				break;
+			case PF_INET6:
+				if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
+					return NF_ACCEPT;
+				break;
+			default:
+				return NF_DROP_ERR(-ECONNREFUSED);
+			}
+		}
+		if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
+			return NF_DROP;
+		secmark_perm = PACKET__SEND;
+	} else {
+		/* Locally generated packet, fetch the security label from the
+		 * associated socket. */
+		struct sk_security_struct *sksec = sk->sk_security;
+		peer_sid = sksec->sid;
+		secmark_perm = PACKET__SEND;
+	}
+
+	ad.type = LSM_AUDIT_DATA_NET;
+	ad.u.net = &net;
+	ad.u.net->netif = ifindex;
+	ad.u.net->family = family;
+	if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
+		return NF_DROP;
+
+	if (secmark_active)
+		if (avc_has_perm(peer_sid, skb->secmark,
+				 SECCLASS_PACKET, secmark_perm, &ad))
+			return NF_DROP_ERR(-ECONNREFUSED);
+
+	if (peerlbl_active) {
+		u32 if_sid;
+		u32 node_sid;
+
+		if (sel_netif_sid(dev_net(outdev), ifindex, &if_sid))
+			return NF_DROP;
+		if (avc_has_perm(peer_sid, if_sid,
+				 SECCLASS_NETIF, NETIF__EGRESS, &ad))
+			return NF_DROP_ERR(-ECONNREFUSED);
+
+		if (sel_netnode_sid(addrp, family, &node_sid))
+			return NF_DROP;
+		if (avc_has_perm(peer_sid, node_sid,
+				 SECCLASS_NODE, NODE__SENDTO, &ad))
+			return NF_DROP_ERR(-ECONNREFUSED);
+	}
+
+	return NF_ACCEPT;
+}
+
+static unsigned int selinux_ipv4_postroute(void *priv,
+					   struct sk_buff *skb,
+					   const struct nf_hook_state *state)
+{
+	return selinux_ip_postroute(skb, state->out, PF_INET);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static unsigned int selinux_ipv6_postroute(void *priv,
+					   struct sk_buff *skb,
+					   const struct nf_hook_state *state)
+{
+	return selinux_ip_postroute(skb, state->out, PF_INET6);
+}
+#endif	/* IPV6 */
+
+#endif	/* CONFIG_NETFILTER */
+
+static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+	return selinux_nlmsg_perm(sk, skb);
+}
+
+static int ipc_alloc_security(struct task_struct *task,
+			      struct kern_ipc_perm *perm,
+			      u16 sclass)
+{
+	struct ipc_security_struct *isec;
+	u32 sid;
+
+	isec = kzalloc(sizeof(struct ipc_security_struct), GFP_KERNEL);
+	if (!isec)
+		return -ENOMEM;
+
+	sid = task_sid(task);
+	isec->sclass = sclass;
+	isec->sid = sid;
+	perm->security = isec;
+
+	return 0;
+}
+
+static void ipc_free_security(struct kern_ipc_perm *perm)
+{
+	struct ipc_security_struct *isec = perm->security;
+	perm->security = NULL;
+	kfree(isec);
+}
+
+static int msg_msg_alloc_security(struct msg_msg *msg)
+{
+	struct msg_security_struct *msec;
+
+	msec = kzalloc(sizeof(struct msg_security_struct), GFP_KERNEL);
+	if (!msec)
+		return -ENOMEM;
+
+	msec->sid = SECINITSID_UNLABELED;
+	msg->security = msec;
+
+	return 0;
+}
+
+static void msg_msg_free_security(struct msg_msg *msg)
+{
+	struct msg_security_struct *msec = msg->security;
+
+	msg->security = NULL;
+	kfree(msec);
+}
+
+static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
+			u32 perms)
+{
+	struct ipc_security_struct *isec;
+	struct common_audit_data ad;
+	u32 sid = current_sid();
+
+	isec = ipc_perms->security;
+
+	ad.type = LSM_AUDIT_DATA_IPC;
+	ad.u.ipc_id = ipc_perms->key;
+
+	return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
+}
+
+static int selinux_msg_msg_alloc_security(struct msg_msg *msg)
+{
+	return msg_msg_alloc_security(msg);
+}
+
+static void selinux_msg_msg_free_security(struct msg_msg *msg)
+{
+	msg_msg_free_security(msg);
+}
+
+/* message queue security operations */
+static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
+{
+	struct ipc_security_struct *isec;
+	struct common_audit_data ad;
+	u32 sid = current_sid();
+	int rc;
+
+	rc = ipc_alloc_security(current, &msq->q_perm, SECCLASS_MSGQ);
+	if (rc)
+		return rc;
+
+	isec = msq->q_perm.security;
+
+	ad.type = LSM_AUDIT_DATA_IPC;
+	ad.u.ipc_id = msq->q_perm.key;
+
+	rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
+			  MSGQ__CREATE, &ad);
+	if (rc) {
+		ipc_free_security(&msq->q_perm);
+		return rc;
+	}
+	return 0;
+}
+
+static void selinux_msg_queue_free_security(struct msg_queue *msq)
+{
+	ipc_free_security(&msq->q_perm);
+}
+
+static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg)
+{
+	struct ipc_security_struct *isec;
+	struct common_audit_data ad;
+	u32 sid = current_sid();
+
+	isec = msq->q_perm.security;
+
+	ad.type = LSM_AUDIT_DATA_IPC;
+	ad.u.ipc_id = msq->q_perm.key;
+
+	return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
+			    MSGQ__ASSOCIATE, &ad);
+}
+
+static int selinux_msg_queue_msgctl(struct msg_queue *msq, int cmd)
+{
+	int err;
+	int perms;
+
+	switch (cmd) {
+	case IPC_INFO:
+	case MSG_INFO:
+		/* No specific object, just general system-wide information. */
+		return task_has_system(current, SYSTEM__IPC_INFO);
+	case IPC_STAT:
+	case MSG_STAT:
+		perms = MSGQ__GETATTR | MSGQ__ASSOCIATE;
+		break;
+	case IPC_SET:
+		perms = MSGQ__SETATTR;
+		break;
+	case IPC_RMID:
+		perms = MSGQ__DESTROY;
+		break;
+	default:
+		return 0;
+	}
+
+	err = ipc_has_perm(&msq->q_perm, perms);
+	return err;
+}
+
+static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg, int msqflg)
+{
+	struct ipc_security_struct *isec;
+	struct msg_security_struct *msec;
+	struct common_audit_data ad;
+	u32 sid = current_sid();
+	int rc;
+
+	isec = msq->q_perm.security;
+	msec = msg->security;
+
+	/*
+	 * First time through, need to assign label to the message
+	 */
+	if (msec->sid == SECINITSID_UNLABELED) {
+		/*
+		 * Compute new sid based on current process and
+		 * message queue this message will be stored in
+		 */
+		rc = security_transition_sid(sid, isec->sid, SECCLASS_MSG,
+					     NULL, &msec->sid);
+		if (rc)
+			return rc;
+	}
+
+	ad.type = LSM_AUDIT_DATA_IPC;
+	ad.u.ipc_id = msq->q_perm.key;
+
+	/* Can this process write to the queue? */
+	rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
+			  MSGQ__WRITE, &ad);
+	if (!rc)
+		/* Can this process send the message */
+		rc = avc_has_perm(sid, msec->sid, SECCLASS_MSG,
+				  MSG__SEND, &ad);
+	if (!rc)
+		/* Can the message be put in the queue? */
+		rc = avc_has_perm(msec->sid, isec->sid, SECCLASS_MSGQ,
+				  MSGQ__ENQUEUE, &ad);
+
+	return rc;
+}
+
+static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
+				    struct task_struct *target,
+				    long type, int mode)
+{
+	struct ipc_security_struct *isec;
+	struct msg_security_struct *msec;
+	struct common_audit_data ad;
+	u32 sid = task_sid(target);
+	int rc;
+
+	isec = msq->q_perm.security;
+	msec = msg->security;
+
+	ad.type = LSM_AUDIT_DATA_IPC;
+	ad.u.ipc_id = msq->q_perm.key;
+
+	rc = avc_has_perm(sid, isec->sid,
+			  SECCLASS_MSGQ, MSGQ__READ, &ad);
+	if (!rc)
+		rc = avc_has_perm(sid, msec->sid,
+				  SECCLASS_MSG, MSG__RECEIVE, &ad);
+	return rc;
+}
+
+/* Shared Memory security operations */
+static int selinux_shm_alloc_security(struct shmid_kernel *shp)
+{
+	struct ipc_security_struct *isec;
+	struct common_audit_data ad;
+	u32 sid = current_sid();
+	int rc;
+
+	rc = ipc_alloc_security(current, &shp->shm_perm, SECCLASS_SHM);
+	if (rc)
+		return rc;
+
+	isec = shp->shm_perm.security;
+
+	ad.type = LSM_AUDIT_DATA_IPC;
+	ad.u.ipc_id = shp->shm_perm.key;
+
+	rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM,
+			  SHM__CREATE, &ad);
+	if (rc) {
+		ipc_free_security(&shp->shm_perm);
+		return rc;
+	}
+	return 0;
+}
+
+static void selinux_shm_free_security(struct shmid_kernel *shp)
+{
+	ipc_free_security(&shp->shm_perm);
+}
+
+static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg)
+{
+	struct ipc_security_struct *isec;
+	struct common_audit_data ad;
+	u32 sid = current_sid();
+
+	isec = shp->shm_perm.security;
+
+	ad.type = LSM_AUDIT_DATA_IPC;
+	ad.u.ipc_id = shp->shm_perm.key;
+
+	return avc_has_perm(sid, isec->sid, SECCLASS_SHM,
+			    SHM__ASSOCIATE, &ad);
+}
+
+/* Note, at this point, shp is locked down */
+static int selinux_shm_shmctl(struct shmid_kernel *shp, int cmd)
+{
+	int perms;
+	int err;
+
+	switch (cmd) {
+	case IPC_INFO:
+	case SHM_INFO:
+		/* No specific object, just general system-wide information. */
+		return task_has_system(current, SYSTEM__IPC_INFO);
+	case IPC_STAT:
+	case SHM_STAT:
+		perms = SHM__GETATTR | SHM__ASSOCIATE;
+		break;
+	case IPC_SET:
+		perms = SHM__SETATTR;
+		break;
+	case SHM_LOCK:
+	case SHM_UNLOCK:
+		perms = SHM__LOCK;
+		break;
+	case IPC_RMID:
+		perms = SHM__DESTROY;
+		break;
+	default:
+		return 0;
+	}
+
+	err = ipc_has_perm(&shp->shm_perm, perms);
+	return err;
+}
+
+static int selinux_shm_shmat(struct shmid_kernel *shp,
+			     char __user *shmaddr, int shmflg)
+{
+	u32 perms;
+
+	if (shmflg & SHM_RDONLY)
+		perms = SHM__READ;
+	else
+		perms = SHM__READ | SHM__WRITE;
+
+	return ipc_has_perm(&shp->shm_perm, perms);
+}
+
+/* Semaphore security operations */
+static int selinux_sem_alloc_security(struct sem_array *sma)
+{
+	struct ipc_security_struct *isec;
+	struct common_audit_data ad;
+	u32 sid = current_sid();
+	int rc;
+
+	rc = ipc_alloc_security(current, &sma->sem_perm, SECCLASS_SEM);
+	if (rc)
+		return rc;
+
+	isec = sma->sem_perm.security;
+
+	ad.type = LSM_AUDIT_DATA_IPC;
+	ad.u.ipc_id = sma->sem_perm.key;
+
+	rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM,
+			  SEM__CREATE, &ad);
+	if (rc) {
+		ipc_free_security(&sma->sem_perm);
+		return rc;
+	}
+	return 0;
+}
+
+static void selinux_sem_free_security(struct sem_array *sma)
+{
+	ipc_free_security(&sma->sem_perm);
+}
+
+static int selinux_sem_associate(struct sem_array *sma, int semflg)
+{
+	struct ipc_security_struct *isec;
+	struct common_audit_data ad;
+	u32 sid = current_sid();
+
+	isec = sma->sem_perm.security;
+
+	ad.type = LSM_AUDIT_DATA_IPC;
+	ad.u.ipc_id = sma->sem_perm.key;
+
+	return avc_has_perm(sid, isec->sid, SECCLASS_SEM,
+			    SEM__ASSOCIATE, &ad);
+}
+
+/* Note, at this point, sma is locked down */
+static int selinux_sem_semctl(struct sem_array *sma, int cmd)
+{
+	int err;
+	u32 perms;
+
+	switch (cmd) {
+	case IPC_INFO:
+	case SEM_INFO:
+		/* No specific object, just general system-wide information. */
+		return task_has_system(current, SYSTEM__IPC_INFO);
+	case GETPID:
+	case GETNCNT:
+	case GETZCNT:
+		perms = SEM__GETATTR;
+		break;
+	case GETVAL:
+	case GETALL:
+		perms = SEM__READ;
+		break;
+	case SETVAL:
+	case SETALL:
+		perms = SEM__WRITE;
+		break;
+	case IPC_RMID:
+		perms = SEM__DESTROY;
+		break;
+	case IPC_SET:
+		perms = SEM__SETATTR;
+		break;
+	case IPC_STAT:
+	case SEM_STAT:
+		perms = SEM__GETATTR | SEM__ASSOCIATE;
+		break;
+	default:
+		return 0;
+	}
+
+	err = ipc_has_perm(&sma->sem_perm, perms);
+	return err;
+}
+
+static int selinux_sem_semop(struct sem_array *sma,
+			     struct sembuf *sops, unsigned nsops, int alter)
+{
+	u32 perms;
+
+	if (alter)
+		perms = SEM__READ | SEM__WRITE;
+	else
+		perms = SEM__READ;
+
+	return ipc_has_perm(&sma->sem_perm, perms);
+}
+
+static int selinux_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
+{
+	u32 av = 0;
+
+	av = 0;
+	if (flag & S_IRUGO)
+		av |= IPC__UNIX_READ;
+	if (flag & S_IWUGO)
+		av |= IPC__UNIX_WRITE;
+
+	if (av == 0)
+		return 0;
+
+	return ipc_has_perm(ipcp, av);
+}
+
+static void selinux_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
+{
+	struct ipc_security_struct *isec = ipcp->security;
+	*secid = isec->sid;
+}
+
+static void selinux_d_instantiate(struct dentry *dentry, struct inode *inode)
+{
+	if (inode)
+		inode_doinit_with_dentry(inode, dentry);
+}
+
+static int selinux_getprocattr(struct task_struct *p,
+			       char *name, char **value)
+{
+	const struct task_security_struct *__tsec;
+	u32 sid;
+	int error;
+	unsigned len;
+
+	if (current != p) {
+		error = current_has_perm(p, PROCESS__GETATTR);
+		if (error)
+			return error;
+	}
+
+	rcu_read_lock();
+	__tsec = __task_cred(p)->security;
+
+	if (!strcmp(name, "current"))
+		sid = __tsec->sid;
+	else if (!strcmp(name, "prev"))
+		sid = __tsec->osid;
+	else if (!strcmp(name, "exec"))
+		sid = __tsec->exec_sid;
+	else if (!strcmp(name, "fscreate"))
+		sid = __tsec->create_sid;
+	else if (!strcmp(name, "keycreate"))
+		sid = __tsec->keycreate_sid;
+	else if (!strcmp(name, "sockcreate"))
+		sid = __tsec->sockcreate_sid;
+	else
+		goto invalid;
+	rcu_read_unlock();
+
+	if (!sid)
+		return 0;
+
+	error = security_sid_to_context(sid, value, &len);
+	if (error)
+		return error;
+	return len;
+
+invalid:
+	rcu_read_unlock();
+	return -EINVAL;
+}
+
+static int selinux_setprocattr(struct task_struct *p,
+			       char *name, void *value, size_t size)
+{
+	struct task_security_struct *tsec;
+	struct task_struct *tracer;
+	struct cred *new;
+	u32 sid = 0, ptsid;
+	int error;
+	char *str = value;
+
+	if (current != p) {
+		/* SELinux only allows a process to change its own
+		   security attributes. */
+		return -EACCES;
+	}
+
+	/*
+	 * Basic control over ability to set these attributes at all.
+	 * current == p, but we'll pass them separately in case the
+	 * above restriction is ever removed.
+	 */
+	if (!strcmp(name, "exec"))
+		error = current_has_perm(p, PROCESS__SETEXEC);
+	else if (!strcmp(name, "fscreate"))
+		error = current_has_perm(p, PROCESS__SETFSCREATE);
+	else if (!strcmp(name, "keycreate"))
+		error = current_has_perm(p, PROCESS__SETKEYCREATE);
+	else if (!strcmp(name, "sockcreate"))
+		error = current_has_perm(p, PROCESS__SETSOCKCREATE);
+	else if (!strcmp(name, "current"))
+		error = current_has_perm(p, PROCESS__SETCURRENT);
+	else
+		error = -EINVAL;
+	if (error)
+		return error;
+
+	/* Obtain a SID for the context, if one was specified. */
+	if (size && str[0] && str[0] != '\n') {
+		if (str[size-1] == '\n') {
+			str[size-1] = 0;
+			size--;
+		}
+		error = security_context_to_sid(value, size, &sid, GFP_KERNEL);
+		if (error == -EINVAL && !strcmp(name, "fscreate")) {
+			if (!capable(CAP_MAC_ADMIN)) {
+				struct audit_buffer *ab;
+				size_t audit_size;
+
+				/* We strip a nul only if it is at the end, otherwise the
+				 * context contains a nul and we should audit that */
+				if (str[size - 1] == '\0')
+					audit_size = size - 1;
+				else
+					audit_size = size;
+				ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR);
+				audit_log_format(ab, "op=fscreate invalid_context=");
+				audit_log_n_untrustedstring(ab, value, audit_size);
+				audit_log_end(ab);
+
+				return error;
+			}
+			error = security_context_to_sid_force(value, size,
+							      &sid);
+		}
+		if (error)
+			return error;
+	}
+
+	new = prepare_creds();
+	if (!new)
+		return -ENOMEM;
+
+	/* Permission checking based on the specified context is
+	   performed during the actual operation (execve,
+	   open/mkdir/...), when we know the full context of the
+	   operation.  See selinux_bprm_set_creds for the execve
+	   checks and may_create for the file creation checks. The
+	   operation will then fail if the context is not permitted. */
+	tsec = new->security;
+	if (!strcmp(name, "exec")) {
+		tsec->exec_sid = sid;
+	} else if (!strcmp(name, "fscreate")) {
+		tsec->create_sid = sid;
+	} else if (!strcmp(name, "keycreate")) {
+		error = may_create_key(sid, p);
+		if (error)
+			goto abort_change;
+		tsec->keycreate_sid = sid;
+	} else if (!strcmp(name, "sockcreate")) {
+		tsec->sockcreate_sid = sid;
+	} else if (!strcmp(name, "current")) {
+		error = -EINVAL;
+		if (sid == 0)
+			goto abort_change;
+
+		/* Only allow single threaded processes to change context */
+		error = -EPERM;
+		if (!current_is_single_threaded()) {
+			error = security_bounded_transition(tsec->sid, sid);
+			if (error)
+				goto abort_change;
+		}
+
+		/* Check permissions for the transition. */
+		error = avc_has_perm(tsec->sid, sid, SECCLASS_PROCESS,
+				     PROCESS__DYNTRANSITION, NULL);
+		if (error)
+			goto abort_change;
+
+		/* Check for ptracing, and update the task SID if ok.
+		   Otherwise, leave SID unchanged and fail. */
+		ptsid = 0;
+		rcu_read_lock();
+		tracer = ptrace_parent(p);
+		if (tracer)
+			ptsid = task_sid(tracer);
+		rcu_read_unlock();
+
+		if (tracer) {
+			error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
+					     PROCESS__PTRACE, NULL);
+			if (error)
+				goto abort_change;
+		}
+
+		tsec->sid = sid;
+	} else {
+		error = -EINVAL;
+		goto abort_change;
+	}
+
+	commit_creds(new);
+	return size;
+
+abort_change:
+	abort_creds(new);
+	return error;
+}
+
+static int selinux_ismaclabel(const char *name)
+{
+	return (strcmp(name, XATTR_SELINUX_SUFFIX) == 0);
+}
+
+static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+	return security_sid_to_context(secid, secdata, seclen);
+}
+
+static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
+{
+	return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL);
+}
+
+static void selinux_release_secctx(char *secdata, u32 seclen)
+{
+	kfree(secdata);
+}
+
+/*
+ *	called with inode->i_mutex locked
+ */
+static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+	return selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX, ctx, ctxlen, 0);
+}
+
+/*
+ *	called with inode->i_mutex locked
+ */
+static int selinux_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+	return __vfs_setxattr_noperm(dentry, XATTR_NAME_SELINUX, ctx, ctxlen, 0);
+}
+
+static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+	int len = 0;
+	len = selinux_inode_getsecurity(inode, XATTR_SELINUX_SUFFIX,
+						ctx, true);
+	if (len < 0)
+		return len;
+	*ctxlen = len;
+	return 0;
+}
+#ifdef CONFIG_KEYS
+
+static int selinux_key_alloc(struct key *k, const struct cred *cred,
+			     unsigned long flags)
+{
+	const struct task_security_struct *tsec;
+	struct key_security_struct *ksec;
+
+	ksec = kzalloc(sizeof(struct key_security_struct), GFP_KERNEL);
+	if (!ksec)
+		return -ENOMEM;
+
+	tsec = cred->security;
+	if (tsec->keycreate_sid)
+		ksec->sid = tsec->keycreate_sid;
+	else
+		ksec->sid = tsec->sid;
+
+	k->security = ksec;
+	return 0;
+}
+
+static void selinux_key_free(struct key *k)
+{
+	struct key_security_struct *ksec = k->security;
+
+	k->security = NULL;
+	kfree(ksec);
+}
+
+static int selinux_key_permission(key_ref_t key_ref,
+				  const struct cred *cred,
+				  unsigned perm)
+{
+	struct key *key;
+	struct key_security_struct *ksec;
+	u32 sid;
+
+	/* if no specific permissions are requested, we skip the
+	   permission check. No serious, additional covert channels
+	   appear to be created. */
+	if (perm == 0)
+		return 0;
+
+	sid = cred_sid(cred);
+
+	key = key_ref_to_ptr(key_ref);
+	ksec = key->security;
+
+	return avc_has_perm(sid, ksec->sid, SECCLASS_KEY, perm, NULL);
+}
+
+static int selinux_key_getsecurity(struct key *key, char **_buffer)
+{
+	struct key_security_struct *ksec = key->security;
+	char *context = NULL;
+	unsigned len;
+	int rc;
+
+	rc = security_sid_to_context(ksec->sid, &context, &len);
+	if (!rc)
+		rc = len;
+	*_buffer = context;
+	return rc;
+}
+
+#endif
+
+static struct security_hook_list selinux_hooks[] = {
+	LSM_HOOK_INIT(binder_set_context_mgr, selinux_binder_set_context_mgr),
+	LSM_HOOK_INIT(binder_transaction, selinux_binder_transaction),
+	LSM_HOOK_INIT(binder_transfer_binder, selinux_binder_transfer_binder),
+	LSM_HOOK_INIT(binder_transfer_file, selinux_binder_transfer_file),
+
+	LSM_HOOK_INIT(ptrace_access_check, selinux_ptrace_access_check),
+	LSM_HOOK_INIT(ptrace_traceme, selinux_ptrace_traceme),
+	LSM_HOOK_INIT(capget, selinux_capget),
+	LSM_HOOK_INIT(capset, selinux_capset),
+	LSM_HOOK_INIT(capable, selinux_capable),
+	LSM_HOOK_INIT(quotactl, selinux_quotactl),
+	LSM_HOOK_INIT(quota_on, selinux_quota_on),
+	LSM_HOOK_INIT(syslog, selinux_syslog),
+	LSM_HOOK_INIT(vm_enough_memory, selinux_vm_enough_memory),
+
+	LSM_HOOK_INIT(netlink_send, selinux_netlink_send),
+
+	LSM_HOOK_INIT(bprm_set_creds, selinux_bprm_set_creds),
+	LSM_HOOK_INIT(bprm_committing_creds, selinux_bprm_committing_creds),
+	LSM_HOOK_INIT(bprm_committed_creds, selinux_bprm_committed_creds),
+	LSM_HOOK_INIT(bprm_secureexec, selinux_bprm_secureexec),
+
+	LSM_HOOK_INIT(sb_alloc_security, selinux_sb_alloc_security),
+	LSM_HOOK_INIT(sb_free_security, selinux_sb_free_security),
+	LSM_HOOK_INIT(sb_copy_data, selinux_sb_copy_data),
+	LSM_HOOK_INIT(sb_remount, selinux_sb_remount),
+	LSM_HOOK_INIT(sb_kern_mount, selinux_sb_kern_mount),
+	LSM_HOOK_INIT(sb_show_options, selinux_sb_show_options),
+	LSM_HOOK_INIT(sb_statfs, selinux_sb_statfs),
+	LSM_HOOK_INIT(sb_mount, selinux_mount),
+	LSM_HOOK_INIT(sb_umount, selinux_umount),
+	LSM_HOOK_INIT(sb_set_mnt_opts, selinux_set_mnt_opts),
+	LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts),
+	LSM_HOOK_INIT(sb_parse_opts_str, selinux_parse_opts_str),
+
+	LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security),
+
+	LSM_HOOK_INIT(inode_alloc_security, selinux_inode_alloc_security),
+	LSM_HOOK_INIT(inode_free_security, selinux_inode_free_security),
+	LSM_HOOK_INIT(inode_init_security, selinux_inode_init_security),
+	LSM_HOOK_INIT(inode_create, selinux_inode_create),
+	LSM_HOOK_INIT(inode_link, selinux_inode_link),
+	LSM_HOOK_INIT(inode_unlink, selinux_inode_unlink),
+	LSM_HOOK_INIT(inode_symlink, selinux_inode_symlink),
+	LSM_HOOK_INIT(inode_mkdir, selinux_inode_mkdir),
+	LSM_HOOK_INIT(inode_rmdir, selinux_inode_rmdir),
+	LSM_HOOK_INIT(inode_mknod, selinux_inode_mknod),
+	LSM_HOOK_INIT(inode_rename, selinux_inode_rename),
+	LSM_HOOK_INIT(inode_readlink, selinux_inode_readlink),
+	LSM_HOOK_INIT(inode_follow_link, selinux_inode_follow_link),
+	LSM_HOOK_INIT(inode_permission, selinux_inode_permission),
+	LSM_HOOK_INIT(inode_setattr, selinux_inode_setattr),
+	LSM_HOOK_INIT(inode_getattr, selinux_inode_getattr),
+	LSM_HOOK_INIT(inode_setxattr, selinux_inode_setxattr),
+	LSM_HOOK_INIT(inode_post_setxattr, selinux_inode_post_setxattr),
+	LSM_HOOK_INIT(inode_getxattr, selinux_inode_getxattr),
+	LSM_HOOK_INIT(inode_listxattr, selinux_inode_listxattr),
+	LSM_HOOK_INIT(inode_removexattr, selinux_inode_removexattr),
+	LSM_HOOK_INIT(inode_getsecurity, selinux_inode_getsecurity),
+	LSM_HOOK_INIT(inode_setsecurity, selinux_inode_setsecurity),
+	LSM_HOOK_INIT(inode_listsecurity, selinux_inode_listsecurity),
+	LSM_HOOK_INIT(inode_getsecid, selinux_inode_getsecid),
+
+	LSM_HOOK_INIT(file_permission, selinux_file_permission),
+	LSM_HOOK_INIT(file_alloc_security, selinux_file_alloc_security),
+	LSM_HOOK_INIT(file_free_security, selinux_file_free_security),
+	LSM_HOOK_INIT(file_ioctl, selinux_file_ioctl),
+	LSM_HOOK_INIT(mmap_file, selinux_mmap_file),
+	LSM_HOOK_INIT(mmap_addr, selinux_mmap_addr),
+	LSM_HOOK_INIT(file_mprotect, selinux_file_mprotect),
+	LSM_HOOK_INIT(file_lock, selinux_file_lock),
+	LSM_HOOK_INIT(file_fcntl, selinux_file_fcntl),
+	LSM_HOOK_INIT(file_set_fowner, selinux_file_set_fowner),
+	LSM_HOOK_INIT(file_send_sigiotask, selinux_file_send_sigiotask),
+	LSM_HOOK_INIT(file_receive, selinux_file_receive),
+
+	LSM_HOOK_INIT(file_open, selinux_file_open),
+
+	LSM_HOOK_INIT(task_create, selinux_task_create),
+	LSM_HOOK_INIT(cred_alloc_blank, selinux_cred_alloc_blank),
+	LSM_HOOK_INIT(cred_free, selinux_cred_free),
+	LSM_HOOK_INIT(cred_prepare, selinux_cred_prepare),
+	LSM_HOOK_INIT(cred_transfer, selinux_cred_transfer),
+	LSM_HOOK_INIT(kernel_act_as, selinux_kernel_act_as),
+	LSM_HOOK_INIT(kernel_create_files_as, selinux_kernel_create_files_as),
+	LSM_HOOK_INIT(kernel_module_request, selinux_kernel_module_request),
+	LSM_HOOK_INIT(task_setpgid, selinux_task_setpgid),
+	LSM_HOOK_INIT(task_getpgid, selinux_task_getpgid),
+	LSM_HOOK_INIT(task_getsid, selinux_task_getsid),
+	LSM_HOOK_INIT(task_getsecid, selinux_task_getsecid),
+	LSM_HOOK_INIT(task_setnice, selinux_task_setnice),
+	LSM_HOOK_INIT(task_setioprio, selinux_task_setioprio),
+	LSM_HOOK_INIT(task_getioprio, selinux_task_getioprio),
+	LSM_HOOK_INIT(task_setrlimit, selinux_task_setrlimit),
+	LSM_HOOK_INIT(task_setscheduler, selinux_task_setscheduler),
+	LSM_HOOK_INIT(task_getscheduler, selinux_task_getscheduler),
+	LSM_HOOK_INIT(task_movememory, selinux_task_movememory),
+	LSM_HOOK_INIT(task_kill, selinux_task_kill),
+	LSM_HOOK_INIT(task_wait, selinux_task_wait),
+	LSM_HOOK_INIT(task_to_inode, selinux_task_to_inode),
+
+	LSM_HOOK_INIT(ipc_permission, selinux_ipc_permission),
+	LSM_HOOK_INIT(ipc_getsecid, selinux_ipc_getsecid),
+
+	LSM_HOOK_INIT(msg_msg_alloc_security, selinux_msg_msg_alloc_security),
+	LSM_HOOK_INIT(msg_msg_free_security, selinux_msg_msg_free_security),
+
+	LSM_HOOK_INIT(msg_queue_alloc_security,
+			selinux_msg_queue_alloc_security),
+	LSM_HOOK_INIT(msg_queue_free_security, selinux_msg_queue_free_security),
+	LSM_HOOK_INIT(msg_queue_associate, selinux_msg_queue_associate),
+	LSM_HOOK_INIT(msg_queue_msgctl, selinux_msg_queue_msgctl),
+	LSM_HOOK_INIT(msg_queue_msgsnd, selinux_msg_queue_msgsnd),
+	LSM_HOOK_INIT(msg_queue_msgrcv, selinux_msg_queue_msgrcv),
+
+	LSM_HOOK_INIT(shm_alloc_security, selinux_shm_alloc_security),
+	LSM_HOOK_INIT(shm_free_security, selinux_shm_free_security),
+	LSM_HOOK_INIT(shm_associate, selinux_shm_associate),
+	LSM_HOOK_INIT(shm_shmctl, selinux_shm_shmctl),
+	LSM_HOOK_INIT(shm_shmat, selinux_shm_shmat),
+
+	LSM_HOOK_INIT(sem_alloc_security, selinux_sem_alloc_security),
+	LSM_HOOK_INIT(sem_free_security, selinux_sem_free_security),
+	LSM_HOOK_INIT(sem_associate, selinux_sem_associate),
+	LSM_HOOK_INIT(sem_semctl, selinux_sem_semctl),
+	LSM_HOOK_INIT(sem_semop, selinux_sem_semop),
+
+	LSM_HOOK_INIT(d_instantiate, selinux_d_instantiate),
+
+	LSM_HOOK_INIT(getprocattr, selinux_getprocattr),
+	LSM_HOOK_INIT(setprocattr, selinux_setprocattr),
+
+	LSM_HOOK_INIT(ismaclabel, selinux_ismaclabel),
+	LSM_HOOK_INIT(secid_to_secctx, selinux_secid_to_secctx),
+	LSM_HOOK_INIT(secctx_to_secid, selinux_secctx_to_secid),
+	LSM_HOOK_INIT(release_secctx, selinux_release_secctx),
+	LSM_HOOK_INIT(inode_notifysecctx, selinux_inode_notifysecctx),
+	LSM_HOOK_INIT(inode_setsecctx, selinux_inode_setsecctx),
+	LSM_HOOK_INIT(inode_getsecctx, selinux_inode_getsecctx),
+
+	LSM_HOOK_INIT(unix_stream_connect, selinux_socket_unix_stream_connect),
+	LSM_HOOK_INIT(unix_may_send, selinux_socket_unix_may_send),
+
+	LSM_HOOK_INIT(socket_create, selinux_socket_create),
+	LSM_HOOK_INIT(socket_post_create, selinux_socket_post_create),
+	LSM_HOOK_INIT(socket_bind, selinux_socket_bind),
+	LSM_HOOK_INIT(socket_connect, selinux_socket_connect),
+	LSM_HOOK_INIT(socket_listen, selinux_socket_listen),
+	LSM_HOOK_INIT(socket_accept, selinux_socket_accept),
+	LSM_HOOK_INIT(socket_sendmsg, selinux_socket_sendmsg),
+	LSM_HOOK_INIT(socket_recvmsg, selinux_socket_recvmsg),
+	LSM_HOOK_INIT(socket_getsockname, selinux_socket_getsockname),
+	LSM_HOOK_INIT(socket_getpeername, selinux_socket_getpeername),
+	LSM_HOOK_INIT(socket_getsockopt, selinux_socket_getsockopt),
+	LSM_HOOK_INIT(socket_setsockopt, selinux_socket_setsockopt),
+	LSM_HOOK_INIT(socket_shutdown, selinux_socket_shutdown),
+	LSM_HOOK_INIT(socket_sock_rcv_skb, selinux_socket_sock_rcv_skb),
+	LSM_HOOK_INIT(socket_getpeersec_stream,
+			selinux_socket_getpeersec_stream),
+	LSM_HOOK_INIT(socket_getpeersec_dgram, selinux_socket_getpeersec_dgram),
+	LSM_HOOK_INIT(sk_alloc_security, selinux_sk_alloc_security),
+	LSM_HOOK_INIT(sk_free_security, selinux_sk_free_security),
+	LSM_HOOK_INIT(sk_clone_security, selinux_sk_clone_security),
+	LSM_HOOK_INIT(sk_getsecid, selinux_sk_getsecid),
+	LSM_HOOK_INIT(sock_graft, selinux_sock_graft),
+	LSM_HOOK_INIT(inet_conn_request, selinux_inet_conn_request),
+	LSM_HOOK_INIT(inet_csk_clone, selinux_inet_csk_clone),
+	LSM_HOOK_INIT(inet_conn_established, selinux_inet_conn_established),
+	LSM_HOOK_INIT(secmark_relabel_packet, selinux_secmark_relabel_packet),
+	LSM_HOOK_INIT(secmark_refcount_inc, selinux_secmark_refcount_inc),
+	LSM_HOOK_INIT(secmark_refcount_dec, selinux_secmark_refcount_dec),
+	LSM_HOOK_INIT(req_classify_flow, selinux_req_classify_flow),
+	LSM_HOOK_INIT(tun_dev_alloc_security, selinux_tun_dev_alloc_security),
+	LSM_HOOK_INIT(tun_dev_free_security, selinux_tun_dev_free_security),
+	LSM_HOOK_INIT(tun_dev_create, selinux_tun_dev_create),
+	LSM_HOOK_INIT(tun_dev_attach_queue, selinux_tun_dev_attach_queue),
+	LSM_HOOK_INIT(tun_dev_attach, selinux_tun_dev_attach),
+	LSM_HOOK_INIT(tun_dev_open, selinux_tun_dev_open),
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+	LSM_HOOK_INIT(xfrm_policy_alloc_security, selinux_xfrm_policy_alloc),
+	LSM_HOOK_INIT(xfrm_policy_clone_security, selinux_xfrm_policy_clone),
+	LSM_HOOK_INIT(xfrm_policy_free_security, selinux_xfrm_policy_free),
+	LSM_HOOK_INIT(xfrm_policy_delete_security, selinux_xfrm_policy_delete),
+	LSM_HOOK_INIT(xfrm_state_alloc, selinux_xfrm_state_alloc),
+	LSM_HOOK_INIT(xfrm_state_alloc_acquire,
+			selinux_xfrm_state_alloc_acquire),
+	LSM_HOOK_INIT(xfrm_state_free_security, selinux_xfrm_state_free),
+	LSM_HOOK_INIT(xfrm_state_delete_security, selinux_xfrm_state_delete),
+	LSM_HOOK_INIT(xfrm_policy_lookup, selinux_xfrm_policy_lookup),
+	LSM_HOOK_INIT(xfrm_state_pol_flow_match,
+			selinux_xfrm_state_pol_flow_match),
+	LSM_HOOK_INIT(xfrm_decode_session, selinux_xfrm_decode_session),
+#endif
+
+#ifdef CONFIG_KEYS
+	LSM_HOOK_INIT(key_alloc, selinux_key_alloc),
+	LSM_HOOK_INIT(key_free, selinux_key_free),
+	LSM_HOOK_INIT(key_permission, selinux_key_permission),
+	LSM_HOOK_INIT(key_getsecurity, selinux_key_getsecurity),
+#endif
+
+#ifdef CONFIG_AUDIT
+	LSM_HOOK_INIT(audit_rule_init, selinux_audit_rule_init),
+	LSM_HOOK_INIT(audit_rule_known, selinux_audit_rule_known),
+	LSM_HOOK_INIT(audit_rule_match, selinux_audit_rule_match),
+	LSM_HOOK_INIT(audit_rule_free, selinux_audit_rule_free),
+#endif
+};
+
+static __init int selinux_init(void)
+{
+	if (!security_module_enable("selinux")) {
+		selinux_enabled = 0;
+		return 0;
+	}
+
+	if (!selinux_enabled) {
+		printk(KERN_INFO "SELinux:  Disabled at boot.\n");
+		return 0;
+	}
+
+	printk(KERN_INFO "SELinux:  Initializing.\n");
+
+	/* Set the security state for the initial task. */
+	cred_init_security();
+
+	default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC);
+
+	sel_inode_cache = kmem_cache_create("selinux_inode_security",
+					    sizeof(struct inode_security_struct),
+					    0, SLAB_PANIC, NULL);
+	file_security_cache = kmem_cache_create("selinux_file_security",
+					    sizeof(struct file_security_struct),
+					    0, SLAB_PANIC, NULL);
+	avc_init();
+
+	security_add_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks));
+
+	if (avc_add_callback(selinux_netcache_avc_callback, AVC_CALLBACK_RESET))
+		panic("SELinux: Unable to register AVC netcache callback\n");
+
+	if (selinux_enforcing)
+		printk(KERN_DEBUG "SELinux:  Starting in enforcing mode\n");
+	else
+		printk(KERN_DEBUG "SELinux:  Starting in permissive mode\n");
+
+	return 0;
+}
+
+static void delayed_superblock_init(struct super_block *sb, void *unused)
+{
+	superblock_doinit(sb, NULL);
+}
+
+void selinux_complete_init(void)
+{
+	printk(KERN_DEBUG "SELinux:  Completing initialization.\n");
+
+	/* Set up any superblocks initialized prior to the policy load. */
+	printk(KERN_DEBUG "SELinux:  Setting up existing superblocks.\n");
+	iterate_supers(delayed_superblock_init, NULL);
+}
+
+/* SELinux requires early initialization in order to label
+   all processes and objects when they are created. */
+security_initcall(selinux_init);
+
+#if defined(CONFIG_NETFILTER)
+
+static struct nf_hook_ops selinux_nf_ops[] = {
+	{
+		.hook =		selinux_ipv4_postroute,
+		.pf =		NFPROTO_IPV4,
+		.hooknum =	NF_INET_POST_ROUTING,
+		.priority =	NF_IP_PRI_SELINUX_LAST,
+	},
+	{
+		.hook =		selinux_ipv4_forward,
+		.pf =		NFPROTO_IPV4,
+		.hooknum =	NF_INET_FORWARD,
+		.priority =	NF_IP_PRI_SELINUX_FIRST,
+	},
+	{
+		.hook =		selinux_ipv4_output,
+		.pf =		NFPROTO_IPV4,
+		.hooknum =	NF_INET_LOCAL_OUT,
+		.priority =	NF_IP_PRI_SELINUX_FIRST,
+	},
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	{
+		.hook =		selinux_ipv6_postroute,
+		.pf =		NFPROTO_IPV6,
+		.hooknum =	NF_INET_POST_ROUTING,
+		.priority =	NF_IP6_PRI_SELINUX_LAST,
+	},
+	{
+		.hook =		selinux_ipv6_forward,
+		.pf =		NFPROTO_IPV6,
+		.hooknum =	NF_INET_FORWARD,
+		.priority =	NF_IP6_PRI_SELINUX_FIRST,
+	},
+#endif	/* IPV6 */
+};
+
+static int __init selinux_nf_ip_init(void)
+{
+	int err;
+
+	if (!selinux_enabled)
+		return 0;
+
+	printk(KERN_DEBUG "SELinux:  Registering netfilter hooks\n");
+
+	err = nf_register_hooks(selinux_nf_ops, ARRAY_SIZE(selinux_nf_ops));
+	if (err)
+		panic("SELinux: nf_register_hooks: error %d\n", err);
+
+	return 0;
+}
+
+__initcall(selinux_nf_ip_init);
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+static void selinux_nf_ip_exit(void)
+{
+	printk(KERN_DEBUG "SELinux:  Unregistering netfilter hooks\n");
+
+	nf_unregister_hooks(selinux_nf_ops, ARRAY_SIZE(selinux_nf_ops));
+}
+#endif
+
+#else /* CONFIG_NETFILTER */
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+#define selinux_nf_ip_exit()
+#endif
+
+#endif /* CONFIG_NETFILTER */
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+static int selinux_disabled;
+
+int selinux_disable(void)
+{
+	if (ss_initialized) {
+		/* Not permitted after initial policy load. */
+		return -EINVAL;
+	}
+
+	if (selinux_disabled) {
+		/* Only do this once. */
+		return -EINVAL;
+	}
+
+	printk(KERN_INFO "SELinux:  Disabled at runtime.\n");
+
+	selinux_disabled = 1;
+	selinux_enabled = 0;
+
+	security_delete_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks));
+
+	/* Try to destroy the avc node cache */
+	avc_disable();
+
+	/* Unregister netfilter hooks. */
+	selinux_nf_ip_exit();
+
+	/* Unregister selinuxfs. */
+	exit_sel_fs();
+
+	return 0;
+}
+#endif
diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
new file mode 100644
index 0000000..1bdf973
--- /dev/null
+++ b/security/selinux/include/audit.h
@@ -0,0 +1,65 @@
+/*
+ * SELinux support for the Audit LSM hooks
+ *
+ * Most of below header was moved from include/linux/selinux.h which
+ * is released under below copyrights:
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ * Copyright (C) 2006 Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez <tinytim@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _SELINUX_AUDIT_H
+#define _SELINUX_AUDIT_H
+
+/**
+ *	selinux_audit_rule_init - alloc/init an selinux audit rule structure.
+ *	@field: the field this rule refers to
+ *	@op: the operater the rule uses
+ *	@rulestr: the text "target" of the rule
+ *	@rule: pointer to the new rule structure returned via this
+ *
+ *	Returns 0 if successful, -errno if not.  On success, the rule structure
+ *	will be allocated internally.  The caller must free this structure with
+ *	selinux_audit_rule_free() after use.
+ */
+int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule);
+
+/**
+ *	selinux_audit_rule_free - free an selinux audit rule structure.
+ *	@rule: pointer to the audit rule to be freed
+ *
+ *	This will free all memory associated with the given rule.
+ *	If @rule is NULL, no operation is performed.
+ */
+void selinux_audit_rule_free(void *rule);
+
+/**
+ *	selinux_audit_rule_match - determine if a context ID matches a rule.
+ *	@sid: the context ID to check
+ *	@field: the field this rule refers to
+ *	@op: the operater the rule uses
+ *	@rule: pointer to the audit rule to check against
+ *	@actx: the audit context (can be NULL) associated with the check
+ *
+ *	Returns 1 if the context id matches the rule, 0 if it does not, and
+ *	-errno on failure.
+ */
+int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *rule,
+			     struct audit_context *actx);
+
+/**
+ *	selinux_audit_rule_known - check to see if rule contains selinux fields.
+ *	@rule: rule to be checked
+ *	Returns 1 if there are selinux fields specified in the rule, 0 otherwise.
+ */
+int selinux_audit_rule_known(struct audit_krule *krule);
+
+#endif /* _SELINUX_AUDIT_H */
+
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
new file mode 100644
index 0000000..0999df0
--- /dev/null
+++ b/security/selinux/include/avc.h
@@ -0,0 +1,190 @@
+/*
+ * Access vector cache interface for object managers.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#ifndef _SELINUX_AVC_H_
+#define _SELINUX_AVC_H_
+
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/audit.h>
+#include <linux/lsm_audit.h>
+#include <linux/in6.h>
+#include "flask.h"
+#include "av_permissions.h"
+#include "security.h"
+
+#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
+extern int selinux_enforcing;
+#else
+#define selinux_enforcing 1
+#endif
+
+/*
+ * An entry in the AVC.
+ */
+struct avc_entry;
+
+struct task_struct;
+struct inode;
+struct sock;
+struct sk_buff;
+
+/*
+ * AVC statistics
+ */
+struct avc_cache_stats {
+	unsigned int lookups;
+	unsigned int misses;
+	unsigned int allocations;
+	unsigned int reclaims;
+	unsigned int frees;
+};
+
+/*
+ * We only need this data after we have decided to send an audit message.
+ */
+struct selinux_audit_data {
+	u32 ssid;
+	u32 tsid;
+	u16 tclass;
+	u32 requested;
+	u32 audited;
+	u32 denied;
+	int result;
+};
+
+/*
+ * AVC operations
+ */
+
+void __init avc_init(void);
+
+static inline u32 avc_audit_required(u32 requested,
+			      struct av_decision *avd,
+			      int result,
+			      u32 auditdeny,
+			      u32 *deniedp)
+{
+	u32 denied, audited;
+	denied = requested & ~avd->allowed;
+	if (unlikely(denied)) {
+		audited = denied & avd->auditdeny;
+		/*
+		 * auditdeny is TRICKY!  Setting a bit in
+		 * this field means that ANY denials should NOT be audited if
+		 * the policy contains an explicit dontaudit rule for that
+		 * permission.  Take notice that this is unrelated to the
+		 * actual permissions that were denied.  As an example lets
+		 * assume:
+		 *
+		 * denied == READ
+		 * avd.auditdeny & ACCESS == 0 (not set means explicit rule)
+		 * auditdeny & ACCESS == 1
+		 *
+		 * We will NOT audit the denial even though the denied
+		 * permission was READ and the auditdeny checks were for
+		 * ACCESS
+		 */
+		if (auditdeny && !(auditdeny & avd->auditdeny))
+			audited = 0;
+	} else if (result)
+		audited = denied = requested;
+	else
+		audited = requested & avd->auditallow;
+	*deniedp = denied;
+	return audited;
+}
+
+int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
+		   u32 requested, u32 audited, u32 denied, int result,
+		   struct common_audit_data *a,
+		   unsigned flags);
+
+/**
+ * avc_audit - Audit the granting or denial of permissions.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @requested: requested permissions
+ * @avd: access vector decisions
+ * @result: result from avc_has_perm_noaudit
+ * @a:  auxiliary audit data
+ * @flags: VFS walk flags
+ *
+ * Audit the granting or denial of permissions in accordance
+ * with the policy.  This function is typically called by
+ * avc_has_perm() after a permission check, but can also be
+ * called directly by callers who use avc_has_perm_noaudit()
+ * in order to separate the permission check from the auditing.
+ * For example, this separation is useful when the permission check must
+ * be performed under a lock, to allow the lock to be released
+ * before calling the auditing code.
+ */
+static inline int avc_audit(u32 ssid, u32 tsid,
+			    u16 tclass, u32 requested,
+			    struct av_decision *avd,
+			    int result,
+			    struct common_audit_data *a,
+			    int flags)
+{
+	u32 audited, denied;
+	audited = avc_audit_required(requested, avd, result, 0, &denied);
+	if (likely(!audited))
+		return 0;
+	return slow_avc_audit(ssid, tsid, tclass,
+			      requested, audited, denied, result,
+			      a, flags);
+}
+
+#define AVC_STRICT 1 /* Ignore permissive mode. */
+#define AVC_EXTENDED_PERMS 2	/* update extended permissions */
+int avc_has_perm_noaudit(u32 ssid, u32 tsid,
+			 u16 tclass, u32 requested,
+			 unsigned flags,
+			 struct av_decision *avd);
+
+int avc_has_perm(u32 ssid, u32 tsid,
+		 u16 tclass, u32 requested,
+		 struct common_audit_data *auditdata);
+int avc_has_perm_flags(u32 ssid, u32 tsid,
+		       u16 tclass, u32 requested,
+		       struct common_audit_data *auditdata,
+		       int flags);
+
+int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+		u8 driver, u8 perm, struct common_audit_data *ad);
+
+
+u32 avc_policy_seqno(void);
+
+#define AVC_CALLBACK_GRANT		1
+#define AVC_CALLBACK_TRY_REVOKE		2
+#define AVC_CALLBACK_REVOKE		4
+#define AVC_CALLBACK_RESET		8
+#define AVC_CALLBACK_AUDITALLOW_ENABLE	16
+#define AVC_CALLBACK_AUDITALLOW_DISABLE	32
+#define AVC_CALLBACK_AUDITDENY_ENABLE	64
+#define AVC_CALLBACK_AUDITDENY_DISABLE	128
+#define AVC_CALLBACK_ADD_XPERMS		256
+
+int avc_add_callback(int (*callback)(u32 event), u32 events);
+
+/* Exported to selinuxfs */
+int avc_get_hash_stats(char *page);
+extern unsigned int avc_cache_threshold;
+
+/* Attempt to free avc node cache */
+void avc_disable(void);
+
+#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
+DECLARE_PER_CPU(struct avc_cache_stats, avc_cache_stats);
+#endif
+
+#endif /* _SELINUX_AVC_H_ */
+
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
new file mode 100644
index 0000000..d5c3284
--- /dev/null
+++ b/security/selinux/include/avc_ss.h
@@ -0,0 +1,28 @@
+/*
+ * Access vector cache interface for the security server.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#ifndef _SELINUX_AVC_SS_H_
+#define _SELINUX_AVC_SS_H_
+
+#include "flask.h"
+
+int avc_ss_reset(u32 seqno);
+
+/* Class/perm mapping support */
+struct security_class_mapping {
+	const char *name;
+	const char *perms[sizeof(u32) * 8 + 1];
+};
+
+extern struct security_class_mapping secclass_map[];
+
+/*
+ * The security server must be initialized before
+ * any labeling or access decisions can be provided.
+ */
+extern int ss_initialized;
+
+#endif /* _SELINUX_AVC_SS_H_ */
+
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
new file mode 100644
index 0000000..5a4eef5
--- /dev/null
+++ b/security/selinux/include/classmap.h
@@ -0,0 +1,161 @@
+#define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
+    "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append"
+
+#define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \
+    "rename", "execute", "quotaon", "mounton", "audit_access", \
+    "open", "execmod"
+
+#define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \
+    "listen", "accept", "getopt", "setopt", "shutdown", "recvfrom",  \
+    "sendto", "name_bind"
+
+#define COMMON_IPC_PERMS "create", "destroy", "getattr", "setattr", "read", \
+	    "write", "associate", "unix_read", "unix_write"
+
+/*
+ * Note: The name for any socket class should be suffixed by "socket",
+ *	 and doesn't contain more than one substr of "socket".
+ */
+struct security_class_mapping secclass_map[] = {
+	{ "security",
+	  { "compute_av", "compute_create", "compute_member",
+	    "check_context", "load_policy", "compute_relabel",
+	    "compute_user", "setenforce", "setbool", "setsecparam",
+	    "setcheckreqprot", "read_policy", NULL } },
+	{ "process",
+	  { "fork", "transition", "sigchld", "sigkill",
+	    "sigstop", "signull", "signal", "ptrace", "getsched", "setsched",
+	    "getsession", "getpgid", "setpgid", "getcap", "setcap", "share",
+	    "getattr", "setexec", "setfscreate", "noatsecure", "siginh",
+	    "setrlimit", "rlimitinh", "dyntransition", "setcurrent",
+	    "execmem", "execstack", "execheap", "setkeycreate",
+	    "setsockcreate", NULL } },
+	{ "system",
+	  { "ipc_info", "syslog_read", "syslog_mod",
+	    "syslog_console", "module_request", NULL } },
+	{ "capability",
+	  { "chown", "dac_override", "dac_read_search",
+	    "fowner", "fsetid", "kill", "setgid", "setuid", "setpcap",
+	    "linux_immutable", "net_bind_service", "net_broadcast",
+	    "net_admin", "net_raw", "ipc_lock", "ipc_owner", "sys_module",
+	    "sys_rawio", "sys_chroot", "sys_ptrace", "sys_pacct", "sys_admin",
+	    "sys_boot", "sys_nice", "sys_resource", "sys_time",
+	    "sys_tty_config", "mknod", "lease", "audit_write",
+	    "audit_control", "setfcap", NULL } },
+	{ "filesystem",
+	  { "mount", "remount", "unmount", "getattr",
+	    "relabelfrom", "relabelto", "associate", "quotamod",
+	    "quotaget", NULL } },
+	{ "file",
+	  { COMMON_FILE_PERMS,
+	    "execute_no_trans", "entrypoint", NULL } },
+	{ "dir",
+	  { COMMON_FILE_PERMS, "add_name", "remove_name",
+	    "reparent", "search", "rmdir", NULL } },
+	{ "fd", { "use", NULL } },
+	{ "lnk_file",
+	  { COMMON_FILE_PERMS, NULL } },
+	{ "chr_file",
+	  { COMMON_FILE_PERMS, NULL } },
+	{ "blk_file",
+	  { COMMON_FILE_PERMS, NULL } },
+	{ "sock_file",
+	  { COMMON_FILE_PERMS, NULL } },
+	{ "fifo_file",
+	  { COMMON_FILE_PERMS, NULL } },
+	{ "socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "tcp_socket",
+	  { COMMON_SOCK_PERMS,
+	    "node_bind", "name_connect",
+	    NULL } },
+	{ "udp_socket",
+	  { COMMON_SOCK_PERMS,
+	    "node_bind", NULL } },
+	{ "rawip_socket",
+	  { COMMON_SOCK_PERMS,
+	    "node_bind", NULL } },
+	{ "node",
+	  { "recvfrom", "sendto", NULL } },
+	{ "netif",
+	  { "ingress", "egress", NULL } },
+	{ "netlink_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "packet_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "key_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "unix_stream_socket",
+	  { COMMON_SOCK_PERMS, "connectto", NULL } },
+	{ "unix_dgram_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "sem",
+	  { COMMON_IPC_PERMS, NULL } },
+	{ "msg", { "send", "receive", NULL } },
+	{ "msgq",
+	  { COMMON_IPC_PERMS, "enqueue", NULL } },
+	{ "shm",
+	  { COMMON_IPC_PERMS, "lock", NULL } },
+	{ "ipc",
+	  { COMMON_IPC_PERMS, NULL } },
+	{ "netlink_route_socket",
+	  { COMMON_SOCK_PERMS,
+	    "nlmsg_read", "nlmsg_write", NULL } },
+	{ "netlink_tcpdiag_socket",
+	  { COMMON_SOCK_PERMS,
+	    "nlmsg_read", "nlmsg_write", NULL } },
+	{ "netlink_nflog_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "netlink_xfrm_socket",
+	  { COMMON_SOCK_PERMS,
+	    "nlmsg_read", "nlmsg_write", NULL } },
+	{ "netlink_selinux_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "netlink_iscsi_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "netlink_audit_socket",
+	  { COMMON_SOCK_PERMS,
+	    "nlmsg_read", "nlmsg_write", "nlmsg_relay", "nlmsg_readpriv",
+	    "nlmsg_tty_audit", NULL } },
+	{ "netlink_fib_lookup_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "netlink_connector_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "netlink_netfilter_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "netlink_dnrt_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "association",
+	  { "sendto", "recvfrom", "setcontext", "polmatch", NULL } },
+	{ "netlink_kobject_uevent_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "netlink_generic_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "netlink_scsitransport_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "netlink_rdma_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "netlink_crypto_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "appletalk_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
+	{ "packet",
+	  { "send", "recv", "relabelto", "forward_in", "forward_out", NULL } },
+	{ "key",
+	  { "view", "read", "write", "search", "link", "setattr", "create",
+	    NULL } },
+	{ "dccp_socket",
+	  { COMMON_SOCK_PERMS,
+	    "node_bind", "name_connect", NULL } },
+	{ "memprotect", { "mmap_zero", NULL } },
+	{ "peer", { "recv", NULL } },
+	{ "capability2",
+	  { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend",
+	    "audit_read", NULL } },
+	{ "kernel_service", { "use_as_override", "create_files_as", NULL } },
+	{ "tun_socket",
+	  { COMMON_SOCK_PERMS, "attach_queue", NULL } },
+	{ "binder", { "impersonate", "call", "set_context_mgr", "transfer",
+		      NULL } },
+	{ NULL }
+  };
diff --git a/security/selinux/include/conditional.h b/security/selinux/include/conditional.h
new file mode 100644
index 0000000..67ce7a8
--- /dev/null
+++ b/security/selinux/include/conditional.h
@@ -0,0 +1,22 @@
+/*
+ * Interface to booleans in the security server. This is exported
+ * for the selinuxfs.
+ *
+ * Author: Karl MacMillan <kmacmillan@tresys.com>
+ *
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ *	This program is free software; you can redistribute it and/or modify
+ *  	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation, version 2.
+ */
+
+#ifndef _SELINUX_CONDITIONAL_H_
+#define _SELINUX_CONDITIONAL_H_
+
+int security_get_bools(int *len, char ***names, int **values);
+
+int security_set_bools(int len, int *values);
+
+int security_get_bool_value(int bool);
+
+#endif
diff --git a/security/selinux/include/initial_sid_to_string.h b/security/selinux/include/initial_sid_to_string.h
new file mode 100644
index 0000000..a59b64e
--- /dev/null
+++ b/security/selinux/include/initial_sid_to_string.h
@@ -0,0 +1,33 @@
+/* This file is automatically generated.  Do not edit. */
+static const char *initial_sid_to_string[] =
+{
+    "null",
+    "kernel",
+    "security",
+    "unlabeled",
+    "fs",
+    "file",
+    "file_labels",
+    "init",
+    "any_socket",
+    "port",
+    "netif",
+    "netmsg",
+    "node",
+    "igmp_packet",
+    "icmp_socket",
+    "tcp_socket",
+    "sysctl_modprobe",
+    "sysctl",
+    "sysctl_fs",
+    "sysctl_kernel",
+    "sysctl_net",
+    "sysctl_net_unix",
+    "sysctl_vm",
+    "sysctl_dev",
+    "kmod",
+    "policy",
+    "scmp_packet",
+    "devnull",
+};
+
diff --git a/security/selinux/include/netif.h b/security/selinux/include/netif.h
new file mode 100644
index 0000000..c721454
--- /dev/null
+++ b/security/selinux/include/netif.h
@@ -0,0 +1,27 @@
+/*
+ * Network interface table.
+ *
+ * Network interfaces (devices) do not have a security field, so we
+ * maintain a table associating each interface with a SID.
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+ *                    Paul Moore <paul@paul-moore.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#ifndef _SELINUX_NETIF_H_
+#define _SELINUX_NETIF_H_
+
+#include <net/net_namespace.h>
+
+void sel_netif_flush(void);
+
+int sel_netif_sid(struct net *ns, int ifindex, u32 *sid);
+
+#endif	/* _SELINUX_NETIF_H_ */
+
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
new file mode 100644
index 0000000..8c59b8f
--- /dev/null
+++ b/security/selinux/include/netlabel.h
@@ -0,0 +1,149 @@
+/*
+ * SELinux interface to the NetLabel subsystem
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ *
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
+ *
+ * This program is free software;  you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program;  if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _SELINUX_NETLABEL_H_
+#define _SELINUX_NETLABEL_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/request_sock.h>
+
+#include "avc.h"
+#include "objsec.h"
+
+#ifdef CONFIG_NETLABEL
+void selinux_netlbl_cache_invalidate(void);
+
+void selinux_netlbl_err(struct sk_buff *skb, int error, int gateway);
+
+void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec);
+void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec);
+
+int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
+				 u16 family,
+				 u32 *type,
+				 u32 *sid);
+int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
+				 u16 family,
+				 u32 sid);
+
+int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family);
+void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family);
+int selinux_netlbl_socket_post_create(struct sock *sk, u16 family);
+int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+				struct sk_buff *skb,
+				u16 family,
+				struct common_audit_data *ad);
+int selinux_netlbl_socket_setsockopt(struct socket *sock,
+				     int level,
+				     int optname);
+int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr);
+
+#else
+static inline void selinux_netlbl_cache_invalidate(void)
+{
+	return;
+}
+
+static inline void selinux_netlbl_err(struct sk_buff *skb,
+				      int error,
+				      int gateway)
+{
+	return;
+}
+
+static inline void selinux_netlbl_sk_security_free(
+					       struct sk_security_struct *sksec)
+{
+	return;
+}
+
+static inline void selinux_netlbl_sk_security_reset(
+					       struct sk_security_struct *sksec)
+{
+	return;
+}
+
+static inline int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
+					       u16 family,
+					       u32 *type,
+					       u32 *sid)
+{
+	*type = NETLBL_NLTYPE_NONE;
+	*sid = SECSID_NULL;
+	return 0;
+}
+static inline int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
+					       u16 family,
+					       u32 sid)
+{
+	return 0;
+}
+
+static inline int selinux_netlbl_conn_setsid(struct sock *sk,
+					     struct sockaddr *addr)
+{
+	return 0;
+}
+
+static inline int selinux_netlbl_inet_conn_request(struct request_sock *req,
+						   u16 family)
+{
+	return 0;
+}
+static inline void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
+{
+	return;
+}
+static inline int selinux_netlbl_socket_post_create(struct sock *sk,
+						    u16 family)
+{
+	return 0;
+}
+static inline int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+					      struct sk_buff *skb,
+					      u16 family,
+					      struct common_audit_data *ad)
+{
+	return 0;
+}
+static inline int selinux_netlbl_socket_setsockopt(struct socket *sock,
+						   int level,
+						   int optname)
+{
+	return 0;
+}
+static inline int selinux_netlbl_socket_connect(struct sock *sk,
+						struct sockaddr *addr)
+{
+	return 0;
+}
+#endif /* CONFIG_NETLABEL */
+
+#endif
diff --git a/security/selinux/include/netnode.h b/security/selinux/include/netnode.h
new file mode 100644
index 0000000..937668d
--- /dev/null
+++ b/security/selinux/include/netnode.h
@@ -0,0 +1,34 @@
+/*
+ * Network node table
+ *
+ * SELinux must keep a mapping of network nodes to labels/SIDs.  This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead since most of these queries happen on
+ * a per-packet basis.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ *
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2007
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SELINUX_NETNODE_H
+#define _SELINUX_NETNODE_H
+
+void sel_netnode_flush(void);
+
+int sel_netnode_sid(void *addr, u16 family, u32 *sid);
+
+#endif
diff --git a/security/selinux/include/netport.h b/security/selinux/include/netport.h
new file mode 100644
index 0000000..d1ce896
--- /dev/null
+++ b/security/selinux/include/netport.h
@@ -0,0 +1,33 @@
+/*
+ * Network port table
+ *
+ * SELinux must keep a mapping of network ports to labels/SIDs.  This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ *
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2008
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SELINUX_NETPORT_H
+#define _SELINUX_NETPORT_H
+
+void sel_netport_flush(void);
+
+int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid);
+
+#endif
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
new file mode 100644
index 0000000..81fa718
--- /dev/null
+++ b/security/selinux/include/objsec.h
@@ -0,0 +1,128 @@
+/*
+ *  NSA Security-Enhanced Linux (SELinux) security module
+ *
+ *  This file contains the SELinux security data structures for kernel objects.
+ *
+ *  Author(s):  Stephen Smalley, <sds@epoch.ncsc.mil>
+ *		Chris Vance, <cvance@nai.com>
+ *		Wayne Salamon, <wsalamon@nai.com>
+ *		James Morris <jmorris@redhat.com>
+ *
+ *  Copyright (C) 2001,2002 Networks Associates Technology, Inc.
+ *  Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License version 2,
+ *	as published by the Free Software Foundation.
+ */
+#ifndef _SELINUX_OBJSEC_H_
+#define _SELINUX_OBJSEC_H_
+
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/binfmts.h>
+#include <linux/in.h>
+#include <linux/spinlock.h>
+#include <net/net_namespace.h>
+#include "flask.h"
+#include "avc.h"
+
+struct task_security_struct {
+	u32 osid;		/* SID prior to last execve */
+	u32 sid;		/* current SID */
+	u32 exec_sid;		/* exec SID */
+	u32 create_sid;		/* fscreate SID */
+	u32 keycreate_sid;	/* keycreate SID */
+	u32 sockcreate_sid;	/* fscreate SID */
+};
+
+struct inode_security_struct {
+	struct inode *inode;	/* back pointer to inode object */
+	union {
+		struct list_head list;	/* list of inode_security_struct */
+		struct rcu_head rcu;	/* for freeing the inode_security_struct */
+	};
+	u32 task_sid;		/* SID of creating task */
+	u32 sid;		/* SID of this object */
+	u16 sclass;		/* security class of this object */
+	unsigned char initialized;	/* initialization flag */
+	struct mutex lock;
+};
+
+struct file_security_struct {
+	u32 sid;		/* SID of open file description */
+	u32 fown_sid;		/* SID of file owner (for SIGIO) */
+	u32 isid;		/* SID of inode at the time of file open */
+	u32 pseqno;		/* Policy seqno at the time of file open */
+};
+
+struct superblock_security_struct {
+	struct super_block *sb;		/* back pointer to sb object */
+	u32 sid;			/* SID of file system superblock */
+	u32 def_sid;			/* default SID for labeling */
+	u32 mntpoint_sid;		/* SECURITY_FS_USE_MNTPOINT context for files */
+	unsigned short behavior;	/* labeling behavior */
+	unsigned short flags;		/* which mount options were specified */
+	struct mutex lock;
+	struct list_head isec_head;
+	spinlock_t isec_lock;
+};
+
+struct msg_security_struct {
+	u32 sid;	/* SID of message */
+};
+
+struct ipc_security_struct {
+	u16 sclass;	/* security class of this object */
+	u32 sid;	/* SID of IPC resource */
+};
+
+struct netif_security_struct {
+	struct net *ns;			/* network namespace */
+	int ifindex;			/* device index */
+	u32 sid;			/* SID for this interface */
+};
+
+struct netnode_security_struct {
+	union {
+		__be32 ipv4;		/* IPv4 node address */
+		struct in6_addr ipv6;	/* IPv6 node address */
+	} addr;
+	u32 sid;			/* SID for this node */
+	u16 family;			/* address family */
+};
+
+struct netport_security_struct {
+	u32 sid;			/* SID for this node */
+	u16 port;			/* port number */
+	u8 protocol;			/* transport protocol */
+};
+
+struct sk_security_struct {
+#ifdef CONFIG_NETLABEL
+	enum {				/* NetLabel state */
+		NLBL_UNSET = 0,
+		NLBL_REQUIRE,
+		NLBL_LABELED,
+		NLBL_REQSKB,
+		NLBL_CONNLABELED,
+	} nlbl_state;
+	struct netlbl_lsm_secattr *nlbl_secattr; /* NetLabel sec attributes */
+#endif
+	u32 sid;			/* SID of this object */
+	u32 peer_sid;			/* SID of peer */
+	u16 sclass;			/* sock security class */
+};
+
+struct tun_security_struct {
+	u32 sid;			/* SID for the tun device sockets */
+};
+
+struct key_security_struct {
+	u32 sid;	/* SID of key */
+};
+
+extern unsigned int selinux_checkreqprot;
+
+#endif /* _SELINUX_OBJSEC_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
new file mode 100644
index 0000000..223e9fd
--- /dev/null
+++ b/security/selinux/include/security.h
@@ -0,0 +1,269 @@
+/*
+ * Security server interface.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ *
+ */
+
+#ifndef _SELINUX_SECURITY_H_
+#define _SELINUX_SECURITY_H_
+
+#include <linux/compiler.h>
+#include <linux/dcache.h>
+#include <linux/magic.h>
+#include <linux/types.h>
+#include "flask.h"
+
+#define SECSID_NULL			0x00000000 /* unspecified SID */
+#define SECSID_WILD			0xffffffff /* wildcard SID */
+#define SECCLASS_NULL			0x0000 /* no class */
+
+/* Identify specific policy version changes */
+#define POLICYDB_VERSION_BASE		15
+#define POLICYDB_VERSION_BOOL		16
+#define POLICYDB_VERSION_IPV6		17
+#define POLICYDB_VERSION_NLCLASS	18
+#define POLICYDB_VERSION_VALIDATETRANS	19
+#define POLICYDB_VERSION_MLS		19
+#define POLICYDB_VERSION_AVTAB		20
+#define POLICYDB_VERSION_RANGETRANS	21
+#define POLICYDB_VERSION_POLCAP		22
+#define POLICYDB_VERSION_PERMISSIVE	23
+#define POLICYDB_VERSION_BOUNDARY	24
+#define POLICYDB_VERSION_FILENAME_TRANS	25
+#define POLICYDB_VERSION_ROLETRANS	26
+#define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS	27
+#define POLICYDB_VERSION_DEFAULT_TYPE	28
+#define POLICYDB_VERSION_CONSTRAINT_NAMES	29
+#define POLICYDB_VERSION_XPERMS_IOCTL	30
+
+/* Range of policy versions we understand*/
+#define POLICYDB_VERSION_MIN   POLICYDB_VERSION_BASE
+#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
+#define POLICYDB_VERSION_MAX	CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
+#else
+#define POLICYDB_VERSION_MAX	POLICYDB_VERSION_XPERMS_IOCTL
+#endif
+
+/* Mask for just the mount related flags */
+#define SE_MNTMASK	0x0f
+/* Super block security struct flags for mount options */
+/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */
+#define CONTEXT_MNT	0x01
+#define FSCONTEXT_MNT	0x02
+#define ROOTCONTEXT_MNT	0x04
+#define DEFCONTEXT_MNT	0x08
+#define SBLABEL_MNT	0x10
+/* Non-mount related flags */
+#define SE_SBINITIALIZED	0x0100
+#define SE_SBPROC		0x0200
+#define SE_SBGENFS		0x0400
+
+#define CONTEXT_STR	"context="
+#define FSCONTEXT_STR	"fscontext="
+#define ROOTCONTEXT_STR	"rootcontext="
+#define DEFCONTEXT_STR	"defcontext="
+#define LABELSUPP_STR "seclabel"
+
+struct netlbl_lsm_secattr;
+
+extern int selinux_enabled;
+
+/* Policy capabilities */
+enum {
+	POLICYDB_CAPABILITY_NETPEER,
+	POLICYDB_CAPABILITY_OPENPERM,
+	POLICYDB_CAPABILITY_REDHAT1,
+	POLICYDB_CAPABILITY_ALWAYSNETWORK,
+	__POLICYDB_CAPABILITY_MAX
+};
+#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
+
+extern int selinux_policycap_netpeer;
+extern int selinux_policycap_openperm;
+extern int selinux_policycap_alwaysnetwork;
+
+/*
+ * type_datum properties
+ * available at the kernel policy version >= POLICYDB_VERSION_BOUNDARY
+ */
+#define TYPEDATUM_PROPERTY_PRIMARY	0x0001
+#define TYPEDATUM_PROPERTY_ATTRIBUTE	0x0002
+
+/* limitation of boundary depth  */
+#define POLICYDB_BOUNDS_MAXDEPTH	4
+
+int security_mls_enabled(void);
+
+int security_load_policy(void *data, size_t len);
+int security_read_policy(void **data, size_t *len);
+size_t security_policydb_len(void);
+
+int security_policycap_supported(unsigned int req_cap);
+
+#define SEL_VEC_MAX 32
+struct av_decision {
+	u32 allowed;
+	u32 auditallow;
+	u32 auditdeny;
+	u32 seqno;
+	u32 flags;
+};
+
+#define XPERMS_ALLOWED 1
+#define XPERMS_AUDITALLOW 2
+#define XPERMS_DONTAUDIT 4
+
+#define security_xperm_set(perms, x) (perms[x >> 5] |= 1 << (x & 0x1f))
+#define security_xperm_test(perms, x) (1 & (perms[x >> 5] >> (x & 0x1f)))
+struct extended_perms_data {
+	u32 p[8];
+};
+
+struct extended_perms_decision {
+	u8 used;
+	u8 driver;
+	struct extended_perms_data *allowed;
+	struct extended_perms_data *auditallow;
+	struct extended_perms_data *dontaudit;
+};
+
+struct extended_perms {
+	u16 len;	/* length associated decision chain */
+	struct extended_perms_data drivers; /* flag drivers that are used */
+};
+
+/* definitions of av_decision.flags */
+#define AVD_FLAGS_PERMISSIVE	0x0001
+
+void security_compute_av(u32 ssid, u32 tsid,
+			 u16 tclass, struct av_decision *avd,
+			 struct extended_perms *xperms);
+
+void security_compute_xperms_decision(u32 ssid, u32 tsid, u16 tclass,
+			 u8 driver, struct extended_perms_decision *xpermd);
+
+void security_compute_av_user(u32 ssid, u32 tsid,
+			     u16 tclass, struct av_decision *avd);
+
+int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
+			    const struct qstr *qstr, u32 *out_sid);
+
+int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass,
+				 const char *objname, u32 *out_sid);
+
+int security_member_sid(u32 ssid, u32 tsid,
+	u16 tclass, u32 *out_sid);
+
+int security_change_sid(u32 ssid, u32 tsid,
+	u16 tclass, u32 *out_sid);
+
+int security_sid_to_context(u32 sid, char **scontext,
+	u32 *scontext_len);
+
+int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len);
+
+int security_context_to_sid(const char *scontext, u32 scontext_len,
+			    u32 *out_sid, gfp_t gfp);
+
+int security_context_str_to_sid(const char *scontext, u32 *out_sid, gfp_t gfp);
+
+int security_context_to_sid_default(const char *scontext, u32 scontext_len,
+				    u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
+
+int security_context_to_sid_force(const char *scontext, u32 scontext_len,
+				  u32 *sid);
+
+int security_get_user_sids(u32 callsid, char *username,
+			   u32 **sids, u32 *nel);
+
+int security_port_sid(u8 protocol, u16 port, u32 *out_sid);
+
+int security_netif_sid(char *name, u32 *if_sid);
+
+int security_node_sid(u16 domain, void *addr, u32 addrlen,
+	u32 *out_sid);
+
+int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
+				 u16 tclass);
+
+int security_bounded_transition(u32 oldsid, u32 newsid);
+
+int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid);
+
+int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
+				 u32 xfrm_sid,
+				 u32 *peer_sid);
+
+int security_get_classes(char ***classes, int *nclasses);
+int security_get_permissions(char *class, char ***perms, int *nperms);
+int security_get_reject_unknown(void);
+int security_get_allow_unknown(void);
+
+#define SECURITY_FS_USE_XATTR		1 /* use xattr */
+#define SECURITY_FS_USE_TRANS		2 /* use transition SIDs, e.g. devpts/tmpfs */
+#define SECURITY_FS_USE_TASK		3 /* use task SIDs, e.g. pipefs/sockfs */
+#define SECURITY_FS_USE_GENFS		4 /* use the genfs support */
+#define SECURITY_FS_USE_NONE		5 /* no labeling support */
+#define SECURITY_FS_USE_MNTPOINT	6 /* use mountpoint labeling */
+#define SECURITY_FS_USE_NATIVE		7 /* use native label support */
+#define SECURITY_FS_USE_MAX		7 /* Highest SECURITY_FS_USE_XXX */
+
+int security_fs_use(struct super_block *sb);
+
+int security_genfs_sid(const char *fstype, char *name, u16 sclass,
+	u32 *sid);
+
+#ifdef CONFIG_NETLABEL
+int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
+				   u32 *sid);
+
+int security_netlbl_sid_to_secattr(u32 sid,
+				   struct netlbl_lsm_secattr *secattr);
+#else
+static inline int security_netlbl_secattr_to_sid(
+					    struct netlbl_lsm_secattr *secattr,
+					    u32 *sid)
+{
+	return -EIDRM;
+}
+
+static inline int security_netlbl_sid_to_secattr(u32 sid,
+					   struct netlbl_lsm_secattr *secattr)
+{
+	return -ENOENT;
+}
+#endif /* CONFIG_NETLABEL */
+
+const char *security_get_initial_sid_context(u32 sid);
+
+/*
+ * status notifier using mmap interface
+ */
+extern struct page *selinux_kernel_status_page(void);
+
+#define SELINUX_KERNEL_STATUS_VERSION	1
+struct selinux_kernel_status {
+	u32	version;	/* version number of thie structure */
+	u32	sequence;	/* sequence number of seqlock logic */
+	u32	enforcing;	/* current setting of enforcing mode */
+	u32	policyload;	/* times of policy reloaded */
+	u32	deny_unknown;	/* current setting of deny_unknown */
+	/*
+	 * The version > 0 supports above members.
+	 */
+} __packed;
+
+extern void selinux_status_update_setenforce(int enforcing);
+extern void selinux_status_update_policyload(int seqno);
+extern void selinux_complete_init(void);
+extern int selinux_disable(void);
+extern void exit_sel_fs(void);
+extern struct path selinux_null;
+extern struct vfsmount *selinuxfs_mount;
+extern void selnl_notify_setenforce(int val);
+extern void selnl_notify_policyload(u32 seqno);
+extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
+
+#endif /* _SELINUX_SECURITY_H_ */
+
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
new file mode 100644
index 0000000..1450f85
--- /dev/null
+++ b/security/selinux/include/xfrm.h
@@ -0,0 +1,93 @@
+/*
+ * SELinux support for the XFRM LSM hooks
+ *
+ * Author : Trent Jaeger, <jaegert@us.ibm.com>
+ * Updated : Venkat Yekkirala, <vyekkirala@TrustedCS.com>
+ */
+#ifndef _SELINUX_XFRM_H_
+#define _SELINUX_XFRM_H_
+
+#include <net/flow.h>
+
+int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+			      struct xfrm_user_sec_ctx *uctx,
+			      gfp_t gfp);
+int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
+			      struct xfrm_sec_ctx **new_ctxp);
+void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
+int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
+int selinux_xfrm_state_alloc(struct xfrm_state *x,
+			     struct xfrm_user_sec_ctx *uctx);
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+				     struct xfrm_sec_ctx *polsec, u32 secid);
+void selinux_xfrm_state_free(struct xfrm_state *x);
+int selinux_xfrm_state_delete(struct xfrm_state *x);
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
+int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
+				      struct xfrm_policy *xp,
+				      const struct flowi *fl);
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+extern atomic_t selinux_xfrm_refcount;
+
+static inline int selinux_xfrm_enabled(void)
+{
+	return (atomic_read(&selinux_xfrm_refcount) > 0);
+}
+
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+			      struct common_audit_data *ad);
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+				struct common_audit_data *ad, u8 proto);
+int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
+
+static inline void selinux_xfrm_notify_policyload(void)
+{
+	struct net *net;
+
+	rtnl_lock();
+	for_each_net(net) {
+		atomic_inc(&net->xfrm.flow_cache_genid);
+		rt_genid_bump_all(net);
+	}
+	rtnl_unlock();
+}
+#else
+static inline int selinux_xfrm_enabled(void)
+{
+	return 0;
+}
+
+static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+					    struct common_audit_data *ad)
+{
+	return 0;
+}
+
+static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+					      struct common_audit_data *ad,
+					      u8 proto)
+{
+	return 0;
+}
+
+static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
+					      int ckall)
+{
+	*sid = SECSID_NULL;
+	return 0;
+}
+
+static inline void selinux_xfrm_notify_policyload(void)
+{
+}
+
+static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
+{
+	*sid = SECSID_NULL;
+	return 0;
+}
+#endif
+
+#endif /* _SELINUX_XFRM_H_ */
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
new file mode 100644
index 0000000..e607b44
--- /dev/null
+++ b/security/selinux/netif.c
@@ -0,0 +1,292 @@
+/*
+ * Network interface table.
+ *
+ * Network interfaces (devices) do not have a security field, so we
+ * maintain a table associating each interface with a SID.
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+ *		      Paul Moore <paul@paul-moore.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
+#include <net/net_namespace.h>
+
+#include "security.h"
+#include "objsec.h"
+#include "netif.h"
+
+#define SEL_NETIF_HASH_SIZE	64
+#define SEL_NETIF_HASH_MAX	1024
+
+struct sel_netif {
+	struct list_head list;
+	struct netif_security_struct nsec;
+	struct rcu_head rcu_head;
+};
+
+static u32 sel_netif_total;
+static LIST_HEAD(sel_netif_list);
+static DEFINE_SPINLOCK(sel_netif_lock);
+static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
+
+/**
+ * sel_netif_hashfn - Hashing function for the interface table
+ * @ns: the network namespace
+ * @ifindex: the network interface
+ *
+ * Description:
+ * This is the hashing function for the network interface table, it returns the
+ * bucket number for the given interface.
+ *
+ */
+static inline u32 sel_netif_hashfn(const struct net *ns, int ifindex)
+{
+	return (((uintptr_t)ns + ifindex) & (SEL_NETIF_HASH_SIZE - 1));
+}
+
+/**
+ * sel_netif_find - Search for an interface record
+ * @ns: the network namespace
+ * @ifindex: the network interface
+ *
+ * Description:
+ * Search the network interface table and return the record matching @ifindex.
+ * If an entry can not be found in the table return NULL.
+ *
+ */
+static inline struct sel_netif *sel_netif_find(const struct net *ns,
+					       int ifindex)
+{
+	int idx = sel_netif_hashfn(ns, ifindex);
+	struct sel_netif *netif;
+
+	list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
+		if (net_eq(netif->nsec.ns, ns) &&
+		    netif->nsec.ifindex == ifindex)
+			return netif;
+
+	return NULL;
+}
+
+/**
+ * sel_netif_insert - Insert a new interface into the table
+ * @netif: the new interface record
+ *
+ * Description:
+ * Add a new interface record to the network interface hash table.  Returns
+ * zero on success, negative values on failure.
+ *
+ */
+static int sel_netif_insert(struct sel_netif *netif)
+{
+	int idx;
+
+	if (sel_netif_total >= SEL_NETIF_HASH_MAX)
+		return -ENOSPC;
+
+	idx = sel_netif_hashfn(netif->nsec.ns, netif->nsec.ifindex);
+	list_add_rcu(&netif->list, &sel_netif_hash[idx]);
+	sel_netif_total++;
+
+	return 0;
+}
+
+/**
+ * sel_netif_destroy - Remove an interface record from the table
+ * @netif: the existing interface record
+ *
+ * Description:
+ * Remove an existing interface record from the network interface table.
+ *
+ */
+static void sel_netif_destroy(struct sel_netif *netif)
+{
+	list_del_rcu(&netif->list);
+	sel_netif_total--;
+	kfree_rcu(netif, rcu_head);
+}
+
+/**
+ * sel_netif_sid_slow - Lookup the SID of a network interface using the policy
+ * @ns: the network namespace
+ * @ifindex: the network interface
+ * @sid: interface SID
+ *
+ * Description:
+ * This function determines the SID of a network interface by quering the
+ * security policy.  The result is added to the network interface table to
+ * speedup future queries.  Returns zero on success, negative values on
+ * failure.
+ *
+ */
+static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
+{
+	int ret;
+	struct sel_netif *netif;
+	struct sel_netif *new = NULL;
+	struct net_device *dev;
+
+	/* NOTE: we always use init's network namespace since we don't
+	 * currently support containers */
+
+	dev = dev_get_by_index(ns, ifindex);
+	if (unlikely(dev == NULL)) {
+		printk(KERN_WARNING
+		       "SELinux: failure in sel_netif_sid_slow(),"
+		       " invalid network interface (%d)\n", ifindex);
+		return -ENOENT;
+	}
+
+	spin_lock_bh(&sel_netif_lock);
+	netif = sel_netif_find(ns, ifindex);
+	if (netif != NULL) {
+		*sid = netif->nsec.sid;
+		ret = 0;
+		goto out;
+	}
+	new = kzalloc(sizeof(*new), GFP_ATOMIC);
+	if (new == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	ret = security_netif_sid(dev->name, &new->nsec.sid);
+	if (ret != 0)
+		goto out;
+	new->nsec.ns = ns;
+	new->nsec.ifindex = ifindex;
+	ret = sel_netif_insert(new);
+	if (ret != 0)
+		goto out;
+	*sid = new->nsec.sid;
+
+out:
+	spin_unlock_bh(&sel_netif_lock);
+	dev_put(dev);
+	if (unlikely(ret)) {
+		printk(KERN_WARNING
+		       "SELinux: failure in sel_netif_sid_slow(),"
+		       " unable to determine network interface label (%d)\n",
+		       ifindex);
+		kfree(new);
+	}
+	return ret;
+}
+
+/**
+ * sel_netif_sid - Lookup the SID of a network interface
+ * @ns: the network namespace
+ * @ifindex: the network interface
+ * @sid: interface SID
+ *
+ * Description:
+ * This function determines the SID of a network interface using the fastest
+ * method possible.  First the interface table is queried, but if an entry
+ * can't be found then the policy is queried and the result is added to the
+ * table to speedup future queries.  Returns zero on success, negative values
+ * on failure.
+ *
+ */
+int sel_netif_sid(struct net *ns, int ifindex, u32 *sid)
+{
+	struct sel_netif *netif;
+
+	rcu_read_lock();
+	netif = sel_netif_find(ns, ifindex);
+	if (likely(netif != NULL)) {
+		*sid = netif->nsec.sid;
+		rcu_read_unlock();
+		return 0;
+	}
+	rcu_read_unlock();
+
+	return sel_netif_sid_slow(ns, ifindex, sid);
+}
+
+/**
+ * sel_netif_kill - Remove an entry from the network interface table
+ * @ns: the network namespace
+ * @ifindex: the network interface
+ *
+ * Description:
+ * This function removes the entry matching @ifindex from the network interface
+ * table if it exists.
+ *
+ */
+static void sel_netif_kill(const struct net *ns, int ifindex)
+{
+	struct sel_netif *netif;
+
+	rcu_read_lock();
+	spin_lock_bh(&sel_netif_lock);
+	netif = sel_netif_find(ns, ifindex);
+	if (netif)
+		sel_netif_destroy(netif);
+	spin_unlock_bh(&sel_netif_lock);
+	rcu_read_unlock();
+}
+
+/**
+ * sel_netif_flush - Flush the entire network interface table
+ *
+ * Description:
+ * Remove all entries from the network interface table.
+ *
+ */
+void sel_netif_flush(void)
+{
+	int idx;
+	struct sel_netif *netif;
+
+	spin_lock_bh(&sel_netif_lock);
+	for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++)
+		list_for_each_entry(netif, &sel_netif_hash[idx], list)
+			sel_netif_destroy(netif);
+	spin_unlock_bh(&sel_netif_lock);
+}
+
+static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
+					     unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	if (event == NETDEV_DOWN)
+		sel_netif_kill(dev_net(dev), dev->ifindex);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block sel_netif_netdev_notifier = {
+	.notifier_call = sel_netif_netdev_notifier_handler,
+};
+
+static __init int sel_netif_init(void)
+{
+	int i;
+
+	if (!selinux_enabled)
+		return 0;
+
+	for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&sel_netif_hash[i]);
+
+	register_netdevice_notifier(&sel_netif_netdev_notifier);
+
+	return 0;
+}
+
+__initcall(sel_netif_init);
+
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
new file mode 100644
index 0000000..1f989a5
--- /dev/null
+++ b/security/selinux/netlabel.c
@@ -0,0 +1,497 @@
+/*
+ * SELinux NetLabel Support
+ *
+ * This file provides the necessary glue to tie NetLabel into the SELinux
+ * subsystem.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ *
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2007, 2008
+ *
+ * This program is free software;  you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program;  if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/gfp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/sock.h>
+#include <net/netlabel.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "objsec.h"
+#include "security.h"
+#include "netlabel.h"
+
+/**
+ * selinux_netlbl_sidlookup_cached - Cache a SID lookup
+ * @skb: the packet
+ * @secattr: the NetLabel security attributes
+ * @sid: the SID
+ *
+ * Description:
+ * Query the SELinux security server to lookup the correct SID for the given
+ * security attributes.  If the query is successful, cache the result to speed
+ * up future lookups.  Returns zero on success, negative values on failure.
+ *
+ */
+static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb,
+					   struct netlbl_lsm_secattr *secattr,
+					   u32 *sid)
+{
+	int rc;
+
+	rc = security_netlbl_secattr_to_sid(secattr, sid);
+	if (rc == 0 &&
+	    (secattr->flags & NETLBL_SECATTR_CACHEABLE) &&
+	    (secattr->flags & NETLBL_SECATTR_CACHE))
+		netlbl_cache_add(skb, secattr);
+
+	return rc;
+}
+
+/**
+ * selinux_netlbl_sock_genattr - Generate the NetLabel socket secattr
+ * @sk: the socket
+ *
+ * Description:
+ * Generate the NetLabel security attributes for a socket, making full use of
+ * the socket's attribute cache.  Returns a pointer to the security attributes
+ * on success, NULL on failure.
+ *
+ */
+static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk)
+{
+	int rc;
+	struct sk_security_struct *sksec = sk->sk_security;
+	struct netlbl_lsm_secattr *secattr;
+
+	if (sksec->nlbl_secattr != NULL)
+		return sksec->nlbl_secattr;
+
+	secattr = netlbl_secattr_alloc(GFP_ATOMIC);
+	if (secattr == NULL)
+		return NULL;
+	rc = security_netlbl_sid_to_secattr(sksec->sid, secattr);
+	if (rc != 0) {
+		netlbl_secattr_free(secattr);
+		return NULL;
+	}
+	sksec->nlbl_secattr = secattr;
+
+	return secattr;
+}
+
+/**
+ * selinux_netlbl_sock_getattr - Get the cached NetLabel secattr
+ * @sk: the socket
+ * @sid: the SID
+ *
+ * Query the socket's cached secattr and if the SID matches the cached value
+ * return the cache, otherwise return NULL.
+ *
+ */
+static struct netlbl_lsm_secattr *selinux_netlbl_sock_getattr(
+							const struct sock *sk,
+							u32 sid)
+{
+	struct sk_security_struct *sksec = sk->sk_security;
+	struct netlbl_lsm_secattr *secattr = sksec->nlbl_secattr;
+
+	if (secattr == NULL)
+		return NULL;
+
+	if ((secattr->flags & NETLBL_SECATTR_SECID) &&
+	    (secattr->attr.secid == sid))
+		return secattr;
+
+	return NULL;
+}
+
+/**
+ * selinux_netlbl_cache_invalidate - Invalidate the NetLabel cache
+ *
+ * Description:
+ * Invalidate the NetLabel security attribute mapping cache.
+ *
+ */
+void selinux_netlbl_cache_invalidate(void)
+{
+	netlbl_cache_invalidate();
+}
+
+/**
+ * selinux_netlbl_err - Handle a NetLabel packet error
+ * @skb: the packet
+ * @error: the error code
+ * @gateway: true if host is acting as a gateway, false otherwise
+ *
+ * Description:
+ * When a packet is dropped due to a call to avc_has_perm() pass the error
+ * code to the NetLabel subsystem so any protocol specific processing can be
+ * done.  This is safe to call even if you are unsure if NetLabel labeling is
+ * present on the packet, NetLabel is smart enough to only act when it should.
+ *
+ */
+void selinux_netlbl_err(struct sk_buff *skb, int error, int gateway)
+{
+	netlbl_skbuff_err(skb, error, gateway);
+}
+
+/**
+ * selinux_netlbl_sk_security_free - Free the NetLabel fields
+ * @sksec: the sk_security_struct
+ *
+ * Description:
+ * Free all of the memory in the NetLabel fields of a sk_security_struct.
+ *
+ */
+void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec)
+{
+	if (sksec->nlbl_secattr != NULL)
+		netlbl_secattr_free(sksec->nlbl_secattr);
+}
+
+/**
+ * selinux_netlbl_sk_security_reset - Reset the NetLabel fields
+ * @sksec: the sk_security_struct
+ * @family: the socket family
+ *
+ * Description:
+ * Called when the NetLabel state of a sk_security_struct needs to be reset.
+ * The caller is responsible for all the NetLabel sk_security_struct locking.
+ *
+ */
+void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec)
+{
+	sksec->nlbl_state = NLBL_UNSET;
+}
+
+/**
+ * selinux_netlbl_skbuff_getsid - Get the sid of a packet using NetLabel
+ * @skb: the packet
+ * @family: protocol family
+ * @type: NetLabel labeling protocol type
+ * @sid: the SID
+ *
+ * Description:
+ * Call the NetLabel mechanism to get the security attributes of the given
+ * packet and use those attributes to determine the correct context/SID to
+ * assign to the packet.  Returns zero on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
+				 u16 family,
+				 u32 *type,
+				 u32 *sid)
+{
+	int rc;
+	struct netlbl_lsm_secattr secattr;
+
+	if (!netlbl_enabled()) {
+		*sid = SECSID_NULL;
+		return 0;
+	}
+
+	netlbl_secattr_init(&secattr);
+	rc = netlbl_skbuff_getattr(skb, family, &secattr);
+	if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
+		rc = selinux_netlbl_sidlookup_cached(skb, &secattr, sid);
+	else
+		*sid = SECSID_NULL;
+	*type = secattr.type;
+	netlbl_secattr_destroy(&secattr);
+
+	return rc;
+}
+
+/**
+ * selinux_netlbl_skbuff_setsid - Set the NetLabel on a packet given a sid
+ * @skb: the packet
+ * @family: protocol family
+ * @sid: the SID
+ *
+ * Description
+ * Call the NetLabel mechanism to set the label of a packet using @sid.
+ * Returns zero on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
+				 u16 family,
+				 u32 sid)
+{
+	int rc;
+	struct netlbl_lsm_secattr secattr_storage;
+	struct netlbl_lsm_secattr *secattr = NULL;
+	struct sock *sk;
+
+	/* if this is a locally generated packet check to see if it is already
+	 * being labeled by it's parent socket, if it is just exit */
+	sk = skb_to_full_sk(skb);
+	if (sk != NULL) {
+		struct sk_security_struct *sksec = sk->sk_security;
+		if (sksec->nlbl_state != NLBL_REQSKB)
+			return 0;
+		secattr = selinux_netlbl_sock_getattr(sk, sid);
+	}
+	if (secattr == NULL) {
+		secattr = &secattr_storage;
+		netlbl_secattr_init(secattr);
+		rc = security_netlbl_sid_to_secattr(sid, secattr);
+		if (rc != 0)
+			goto skbuff_setsid_return;
+	}
+
+	rc = netlbl_skbuff_setattr(skb, family, secattr);
+
+skbuff_setsid_return:
+	if (secattr == &secattr_storage)
+		netlbl_secattr_destroy(secattr);
+	return rc;
+}
+
+/**
+ * selinux_netlbl_inet_conn_request - Label an incoming stream connection
+ * @req: incoming connection request socket
+ *
+ * Description:
+ * A new incoming connection request is represented by @req, we need to label
+ * the new request_sock here and the stack will ensure the on-the-wire label
+ * will get preserved when a full sock is created once the connection handshake
+ * is complete.  Returns zero on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family)
+{
+	int rc;
+	struct netlbl_lsm_secattr secattr;
+
+	if (family != PF_INET)
+		return 0;
+
+	netlbl_secattr_init(&secattr);
+	rc = security_netlbl_sid_to_secattr(req->secid, &secattr);
+	if (rc != 0)
+		goto inet_conn_request_return;
+	rc = netlbl_req_setattr(req, &secattr);
+inet_conn_request_return:
+	netlbl_secattr_destroy(&secattr);
+	return rc;
+}
+
+/**
+ * selinux_netlbl_inet_csk_clone - Initialize the newly created sock
+ * @sk: the new sock
+ *
+ * Description:
+ * A new connection has been established using @sk, we've already labeled the
+ * socket via the request_sock struct in selinux_netlbl_inet_conn_request() but
+ * we need to set the NetLabel state here since we now have a sock structure.
+ *
+ */
+void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
+{
+	struct sk_security_struct *sksec = sk->sk_security;
+
+	if (family == PF_INET)
+		sksec->nlbl_state = NLBL_LABELED;
+	else
+		sksec->nlbl_state = NLBL_UNSET;
+}
+
+/**
+ * selinux_netlbl_socket_post_create - Label a socket using NetLabel
+ * @sock: the socket to label
+ * @family: protocol family
+ *
+ * Description:
+ * Attempt to label a socket using the NetLabel mechanism using the given
+ * SID.  Returns zero values on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_socket_post_create(struct sock *sk, u16 family)
+{
+	int rc;
+	struct sk_security_struct *sksec = sk->sk_security;
+	struct netlbl_lsm_secattr *secattr;
+
+	if (family != PF_INET)
+		return 0;
+
+	secattr = selinux_netlbl_sock_genattr(sk);
+	if (secattr == NULL)
+		return -ENOMEM;
+	rc = netlbl_sock_setattr(sk, family, secattr);
+	switch (rc) {
+	case 0:
+		sksec->nlbl_state = NLBL_LABELED;
+		break;
+	case -EDESTADDRREQ:
+		sksec->nlbl_state = NLBL_REQSKB;
+		rc = 0;
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * selinux_netlbl_sock_rcv_skb - Do an inbound access check using NetLabel
+ * @sksec: the sock's sk_security_struct
+ * @skb: the packet
+ * @family: protocol family
+ * @ad: the audit data
+ *
+ * Description:
+ * Fetch the NetLabel security attributes from @skb and perform an access check
+ * against the receiving socket.  Returns zero on success, negative values on
+ * error.
+ *
+ */
+int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+				struct sk_buff *skb,
+				u16 family,
+				struct common_audit_data *ad)
+{
+	int rc;
+	u32 nlbl_sid;
+	u32 perm;
+	struct netlbl_lsm_secattr secattr;
+
+	if (!netlbl_enabled())
+		return 0;
+
+	netlbl_secattr_init(&secattr);
+	rc = netlbl_skbuff_getattr(skb, family, &secattr);
+	if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
+		rc = selinux_netlbl_sidlookup_cached(skb, &secattr, &nlbl_sid);
+	else
+		nlbl_sid = SECINITSID_UNLABELED;
+	netlbl_secattr_destroy(&secattr);
+	if (rc != 0)
+		return rc;
+
+	switch (sksec->sclass) {
+	case SECCLASS_UDP_SOCKET:
+		perm = UDP_SOCKET__RECVFROM;
+		break;
+	case SECCLASS_TCP_SOCKET:
+		perm = TCP_SOCKET__RECVFROM;
+		break;
+	default:
+		perm = RAWIP_SOCKET__RECVFROM;
+	}
+
+	rc = avc_has_perm(sksec->sid, nlbl_sid, sksec->sclass, perm, ad);
+	if (rc == 0)
+		return 0;
+
+	if (nlbl_sid != SECINITSID_UNLABELED)
+		netlbl_skbuff_err(skb, rc, 0);
+	return rc;
+}
+
+/**
+ * selinux_netlbl_socket_setsockopt - Do not allow users to remove a NetLabel
+ * @sock: the socket
+ * @level: the socket level or protocol
+ * @optname: the socket option name
+ *
+ * Description:
+ * Check the setsockopt() call and if the user is trying to replace the IP
+ * options on a socket and a NetLabel is in place for the socket deny the
+ * access; otherwise allow the access.  Returns zero when the access is
+ * allowed, -EACCES when denied, and other negative values on error.
+ *
+ */
+int selinux_netlbl_socket_setsockopt(struct socket *sock,
+				     int level,
+				     int optname)
+{
+	int rc = 0;
+	struct sock *sk = sock->sk;
+	struct sk_security_struct *sksec = sk->sk_security;
+	struct netlbl_lsm_secattr secattr;
+
+	if (level == IPPROTO_IP && optname == IP_OPTIONS &&
+	    (sksec->nlbl_state == NLBL_LABELED ||
+	     sksec->nlbl_state == NLBL_CONNLABELED)) {
+		netlbl_secattr_init(&secattr);
+		lock_sock(sk);
+		/* call the netlabel function directly as we want to see the
+		 * on-the-wire label that is assigned via the socket's options
+		 * and not the cached netlabel/lsm attributes */
+		rc = netlbl_sock_getattr(sk, &secattr);
+		release_sock(sk);
+		if (rc == 0)
+			rc = -EACCES;
+		else if (rc == -ENOMSG)
+			rc = 0;
+		netlbl_secattr_destroy(&secattr);
+	}
+
+	return rc;
+}
+
+/**
+ * selinux_netlbl_socket_connect - Label a client-side socket on connect
+ * @sk: the socket to label
+ * @addr: the destination address
+ *
+ * Description:
+ * Attempt to label a connected socket with NetLabel using the given address.
+ * Returns zero values on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
+{
+	int rc;
+	struct sk_security_struct *sksec = sk->sk_security;
+	struct netlbl_lsm_secattr *secattr;
+
+	if (sksec->nlbl_state != NLBL_REQSKB &&
+	    sksec->nlbl_state != NLBL_CONNLABELED)
+		return 0;
+
+	lock_sock(sk);
+
+	/* connected sockets are allowed to disconnect when the address family
+	 * is set to AF_UNSPEC, if that is what is happening we want to reset
+	 * the socket */
+	if (addr->sa_family == AF_UNSPEC) {
+		netlbl_sock_delattr(sk);
+		sksec->nlbl_state = NLBL_REQSKB;
+		rc = 0;
+		goto socket_connect_return;
+	}
+	secattr = selinux_netlbl_sock_genattr(sk);
+	if (secattr == NULL) {
+		rc = -ENOMEM;
+		goto socket_connect_return;
+	}
+	rc = netlbl_conn_setattr(sk, addr, secattr);
+	if (rc == 0)
+		sksec->nlbl_state = NLBL_CONNLABELED;
+
+socket_connect_return:
+	release_sock(sk);
+	return rc;
+}
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
new file mode 100644
index 0000000..828fb6a
--- /dev/null
+++ b/security/selinux/netlink.c
@@ -0,0 +1,124 @@
+/*
+ * Netlink event notifications for SELinux.
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/skbuff.h>
+#include <linux/selinux_netlink.h>
+#include <net/net_namespace.h>
+#include <net/netlink.h>
+
+#include "security.h"
+
+static struct sock *selnl;
+
+static int selnl_msglen(int msgtype)
+{
+	int ret = 0;
+
+	switch (msgtype) {
+	case SELNL_MSG_SETENFORCE:
+		ret = sizeof(struct selnl_msg_setenforce);
+		break;
+
+	case SELNL_MSG_POLICYLOAD:
+		ret = sizeof(struct selnl_msg_policyload);
+		break;
+
+	default:
+		BUG();
+	}
+	return ret;
+}
+
+static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *data)
+{
+	switch (msgtype) {
+	case SELNL_MSG_SETENFORCE: {
+		struct selnl_msg_setenforce *msg = nlmsg_data(nlh);
+
+		memset(msg, 0, len);
+		msg->val = *((int *)data);
+		break;
+	}
+
+	case SELNL_MSG_POLICYLOAD: {
+		struct selnl_msg_policyload *msg = nlmsg_data(nlh);
+
+		memset(msg, 0, len);
+		msg->seqno = *((u32 *)data);
+		break;
+	}
+
+	default:
+		BUG();
+	}
+}
+
+static void selnl_notify(int msgtype, void *data)
+{
+	int len;
+	sk_buff_data_t tmp;
+	struct sk_buff *skb;
+	struct nlmsghdr *nlh;
+
+	len = selnl_msglen(msgtype);
+
+	skb = nlmsg_new(len, GFP_USER);
+	if (!skb)
+		goto oom;
+
+	tmp = skb->tail;
+	nlh = nlmsg_put(skb, 0, 0, msgtype, len, 0);
+	if (!nlh)
+		goto out_kfree_skb;
+	selnl_add_payload(nlh, len, msgtype, data);
+	nlh->nlmsg_len = skb->tail - tmp;
+	NETLINK_CB(skb).dst_group = SELNLGRP_AVC;
+	netlink_broadcast(selnl, skb, 0, SELNLGRP_AVC, GFP_USER);
+out:
+	return;
+
+out_kfree_skb:
+	kfree_skb(skb);
+oom:
+	printk(KERN_ERR "SELinux:  OOM in %s\n", __func__);
+	goto out;
+}
+
+void selnl_notify_setenforce(int val)
+{
+	selnl_notify(SELNL_MSG_SETENFORCE, &val);
+}
+
+void selnl_notify_policyload(u32 seqno)
+{
+	selnl_notify(SELNL_MSG_POLICYLOAD, &seqno);
+}
+
+static int __init selnl_init(void)
+{
+	struct netlink_kernel_cfg cfg = {
+		.groups	= SELNLGRP_MAX,
+		.flags	= NL_CFG_F_NONROOT_RECV,
+	};
+
+	selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, &cfg);
+	if (selnl == NULL)
+		panic("SELinux:  Cannot create netlink socket.");
+	return 0;
+}
+
+__initcall(selnl_init);
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
new file mode 100644
index 0000000..da923f8
--- /dev/null
+++ b/security/selinux/netnode.c
@@ -0,0 +1,318 @@
+/*
+ * Network node table
+ *
+ * SELinux must keep a mapping of network nodes to labels/SIDs.  This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead since most of these queries happen on
+ * a per-packet basis.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ *
+ * This code is heavily based on the "netif" concept originally developed by
+ * James Morris <jmorris@redhat.com>
+ *   (see security/selinux/netif.c for more information)
+ *
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2007
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "netnode.h"
+#include "objsec.h"
+
+#define SEL_NETNODE_HASH_SIZE       256
+#define SEL_NETNODE_HASH_BKT_LIMIT   16
+
+struct sel_netnode_bkt {
+	unsigned int size;
+	struct list_head list;
+};
+
+struct sel_netnode {
+	struct netnode_security_struct nsec;
+
+	struct list_head list;
+	struct rcu_head rcu;
+};
+
+/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason
+ * for this is that I suspect most users will not make heavy use of both
+ * address families at the same time so one table will usually end up wasted,
+ * if this becomes a problem we can always add a hash table for each address
+ * family later */
+
+static LIST_HEAD(sel_netnode_list);
+static DEFINE_SPINLOCK(sel_netnode_lock);
+static struct sel_netnode_bkt sel_netnode_hash[SEL_NETNODE_HASH_SIZE];
+
+/**
+ * sel_netnode_hashfn_ipv4 - IPv4 hashing function for the node table
+ * @addr: IPv4 address
+ *
+ * Description:
+ * This is the IPv4 hashing function for the node interface table, it returns
+ * the bucket number for the given IP address.
+ *
+ */
+static unsigned int sel_netnode_hashfn_ipv4(__be32 addr)
+{
+	/* at some point we should determine if the mismatch in byte order
+	 * affects the hash function dramatically */
+	return (addr & (SEL_NETNODE_HASH_SIZE - 1));
+}
+
+/**
+ * sel_netnode_hashfn_ipv6 - IPv6 hashing function for the node table
+ * @addr: IPv6 address
+ *
+ * Description:
+ * This is the IPv6 hashing function for the node interface table, it returns
+ * the bucket number for the given IP address.
+ *
+ */
+static unsigned int sel_netnode_hashfn_ipv6(const struct in6_addr *addr)
+{
+	/* just hash the least significant 32 bits to keep things fast (they
+	 * are the most likely to be different anyway), we can revisit this
+	 * later if needed */
+	return (addr->s6_addr32[3] & (SEL_NETNODE_HASH_SIZE - 1));
+}
+
+/**
+ * sel_netnode_find - Search for a node record
+ * @addr: IP address
+ * @family: address family
+ *
+ * Description:
+ * Search the network node table and return the record matching @addr.  If an
+ * entry can not be found in the table return NULL.
+ *
+ */
+static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
+{
+	unsigned int idx;
+	struct sel_netnode *node;
+
+	switch (family) {
+	case PF_INET:
+		idx = sel_netnode_hashfn_ipv4(*(__be32 *)addr);
+		break;
+	case PF_INET6:
+		idx = sel_netnode_hashfn_ipv6(addr);
+		break;
+	default:
+		BUG();
+		return NULL;
+	}
+
+	list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list)
+		if (node->nsec.family == family)
+			switch (family) {
+			case PF_INET:
+				if (node->nsec.addr.ipv4 == *(__be32 *)addr)
+					return node;
+				break;
+			case PF_INET6:
+				if (ipv6_addr_equal(&node->nsec.addr.ipv6,
+						    addr))
+					return node;
+				break;
+			}
+
+	return NULL;
+}
+
+/**
+ * sel_netnode_insert - Insert a new node into the table
+ * @node: the new node record
+ *
+ * Description:
+ * Add a new node record to the network address hash table.
+ *
+ */
+static void sel_netnode_insert(struct sel_netnode *node)
+{
+	unsigned int idx;
+
+	switch (node->nsec.family) {
+	case PF_INET:
+		idx = sel_netnode_hashfn_ipv4(node->nsec.addr.ipv4);
+		break;
+	case PF_INET6:
+		idx = sel_netnode_hashfn_ipv6(&node->nsec.addr.ipv6);
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	/* we need to impose a limit on the growth of the hash table so check
+	 * this bucket to make sure it is within the specified bounds */
+	list_add_rcu(&node->list, &sel_netnode_hash[idx].list);
+	if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
+		struct sel_netnode *tail;
+		tail = list_entry(
+			rcu_dereference_protected(sel_netnode_hash[idx].list.prev,
+						  lockdep_is_held(&sel_netnode_lock)),
+			struct sel_netnode, list);
+		list_del_rcu(&tail->list);
+		kfree_rcu(tail, rcu);
+	} else
+		sel_netnode_hash[idx].size++;
+}
+
+/**
+ * sel_netnode_sid_slow - Lookup the SID of a network address using the policy
+ * @addr: the IP address
+ * @family: the address family
+ * @sid: node SID
+ *
+ * Description:
+ * This function determines the SID of a network address by quering the
+ * security policy.  The result is added to the network address table to
+ * speedup future queries.  Returns zero on success, negative values on
+ * failure.
+ *
+ */
+static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
+{
+	int ret = -ENOMEM;
+	struct sel_netnode *node;
+	struct sel_netnode *new = NULL;
+
+	spin_lock_bh(&sel_netnode_lock);
+	node = sel_netnode_find(addr, family);
+	if (node != NULL) {
+		*sid = node->nsec.sid;
+		spin_unlock_bh(&sel_netnode_lock);
+		return 0;
+	}
+	new = kzalloc(sizeof(*new), GFP_ATOMIC);
+	if (new == NULL)
+		goto out;
+	switch (family) {
+	case PF_INET:
+		ret = security_node_sid(PF_INET,
+					addr, sizeof(struct in_addr), sid);
+		new->nsec.addr.ipv4 = *(__be32 *)addr;
+		break;
+	case PF_INET6:
+		ret = security_node_sid(PF_INET6,
+					addr, sizeof(struct in6_addr), sid);
+		new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
+		break;
+	default:
+		BUG();
+		ret = -EINVAL;
+	}
+	if (ret != 0)
+		goto out;
+
+	new->nsec.family = family;
+	new->nsec.sid = *sid;
+	sel_netnode_insert(new);
+
+out:
+	spin_unlock_bh(&sel_netnode_lock);
+	if (unlikely(ret)) {
+		printk(KERN_WARNING
+		       "SELinux: failure in sel_netnode_sid_slow(),"
+		       " unable to determine network node label\n");
+		kfree(new);
+	}
+	return ret;
+}
+
+/**
+ * sel_netnode_sid - Lookup the SID of a network address
+ * @addr: the IP address
+ * @family: the address family
+ * @sid: node SID
+ *
+ * Description:
+ * This function determines the SID of a network address using the fastest
+ * method possible.  First the address table is queried, but if an entry
+ * can't be found then the policy is queried and the result is added to the
+ * table to speedup future queries.  Returns zero on success, negative values
+ * on failure.
+ *
+ */
+int sel_netnode_sid(void *addr, u16 family, u32 *sid)
+{
+	struct sel_netnode *node;
+
+	rcu_read_lock();
+	node = sel_netnode_find(addr, family);
+	if (node != NULL) {
+		*sid = node->nsec.sid;
+		rcu_read_unlock();
+		return 0;
+	}
+	rcu_read_unlock();
+
+	return sel_netnode_sid_slow(addr, family, sid);
+}
+
+/**
+ * sel_netnode_flush - Flush the entire network address table
+ *
+ * Description:
+ * Remove all entries from the network address table.
+ *
+ */
+void sel_netnode_flush(void)
+{
+	unsigned int idx;
+	struct sel_netnode *node, *node_tmp;
+
+	spin_lock_bh(&sel_netnode_lock);
+	for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++) {
+		list_for_each_entry_safe(node, node_tmp,
+					 &sel_netnode_hash[idx].list, list) {
+				list_del_rcu(&node->list);
+				kfree_rcu(node, rcu);
+		}
+		sel_netnode_hash[idx].size = 0;
+	}
+	spin_unlock_bh(&sel_netnode_lock);
+}
+
+static __init int sel_netnode_init(void)
+{
+	int iter;
+
+	if (!selinux_enabled)
+		return 0;
+
+	for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) {
+		INIT_LIST_HEAD(&sel_netnode_hash[iter].list);
+		sel_netnode_hash[iter].size = 0;
+	}
+
+	return 0;
+}
+
+__initcall(sel_netnode_init);
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
new file mode 100644
index 0000000..3311cc3
--- /dev/null
+++ b/security/selinux/netport.c
@@ -0,0 +1,252 @@
+/*
+ * Network port table
+ *
+ * SELinux must keep a mapping of network ports to labels/SIDs.  This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ *
+ * This code is heavily based on the "netif" concept originally developed by
+ * James Morris <jmorris@redhat.com>
+ *   (see security/selinux/netif.c for more information)
+ *
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2008
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "netport.h"
+#include "objsec.h"
+
+#define SEL_NETPORT_HASH_SIZE       256
+#define SEL_NETPORT_HASH_BKT_LIMIT   16
+
+struct sel_netport_bkt {
+	int size;
+	struct list_head list;
+};
+
+struct sel_netport {
+	struct netport_security_struct psec;
+
+	struct list_head list;
+	struct rcu_head rcu;
+};
+
+/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason
+ * for this is that I suspect most users will not make heavy use of both
+ * address families at the same time so one table will usually end up wasted,
+ * if this becomes a problem we can always add a hash table for each address
+ * family later */
+
+static LIST_HEAD(sel_netport_list);
+static DEFINE_SPINLOCK(sel_netport_lock);
+static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE];
+
+/**
+ * sel_netport_hashfn - Hashing function for the port table
+ * @pnum: port number
+ *
+ * Description:
+ * This is the hashing function for the port table, it returns the bucket
+ * number for the given port.
+ *
+ */
+static unsigned int sel_netport_hashfn(u16 pnum)
+{
+	return (pnum & (SEL_NETPORT_HASH_SIZE - 1));
+}
+
+/**
+ * sel_netport_find - Search for a port record
+ * @protocol: protocol
+ * @port: pnum
+ *
+ * Description:
+ * Search the network port table and return the matching record.  If an entry
+ * can not be found in the table return NULL.
+ *
+ */
+static struct sel_netport *sel_netport_find(u8 protocol, u16 pnum)
+{
+	unsigned int idx;
+	struct sel_netport *port;
+
+	idx = sel_netport_hashfn(pnum);
+	list_for_each_entry_rcu(port, &sel_netport_hash[idx].list, list)
+		if (port->psec.port == pnum && port->psec.protocol == protocol)
+			return port;
+
+	return NULL;
+}
+
+/**
+ * sel_netport_insert - Insert a new port into the table
+ * @port: the new port record
+ *
+ * Description:
+ * Add a new port record to the network address hash table.
+ *
+ */
+static void sel_netport_insert(struct sel_netport *port)
+{
+	unsigned int idx;
+
+	/* we need to impose a limit on the growth of the hash table so check
+	 * this bucket to make sure it is within the specified bounds */
+	idx = sel_netport_hashfn(port->psec.port);
+	list_add_rcu(&port->list, &sel_netport_hash[idx].list);
+	if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
+		struct sel_netport *tail;
+		tail = list_entry(
+			rcu_dereference_protected(
+				sel_netport_hash[idx].list.prev,
+				lockdep_is_held(&sel_netport_lock)),
+			struct sel_netport, list);
+		list_del_rcu(&tail->list);
+		kfree_rcu(tail, rcu);
+	} else
+		sel_netport_hash[idx].size++;
+}
+
+/**
+ * sel_netport_sid_slow - Lookup the SID of a network address using the policy
+ * @protocol: protocol
+ * @pnum: port
+ * @sid: port SID
+ *
+ * Description:
+ * This function determines the SID of a network port by quering the security
+ * policy.  The result is added to the network port table to speedup future
+ * queries.  Returns zero on success, negative values on failure.
+ *
+ */
+static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
+{
+	int ret = -ENOMEM;
+	struct sel_netport *port;
+	struct sel_netport *new = NULL;
+
+	spin_lock_bh(&sel_netport_lock);
+	port = sel_netport_find(protocol, pnum);
+	if (port != NULL) {
+		*sid = port->psec.sid;
+		spin_unlock_bh(&sel_netport_lock);
+		return 0;
+	}
+	new = kzalloc(sizeof(*new), GFP_ATOMIC);
+	if (new == NULL)
+		goto out;
+	ret = security_port_sid(protocol, pnum, sid);
+	if (ret != 0)
+		goto out;
+
+	new->psec.port = pnum;
+	new->psec.protocol = protocol;
+	new->psec.sid = *sid;
+	sel_netport_insert(new);
+
+out:
+	spin_unlock_bh(&sel_netport_lock);
+	if (unlikely(ret)) {
+		printk(KERN_WARNING
+		       "SELinux: failure in sel_netport_sid_slow(),"
+		       " unable to determine network port label\n");
+		kfree(new);
+	}
+	return ret;
+}
+
+/**
+ * sel_netport_sid - Lookup the SID of a network port
+ * @protocol: protocol
+ * @pnum: port
+ * @sid: port SID
+ *
+ * Description:
+ * This function determines the SID of a network port using the fastest method
+ * possible.  First the port table is queried, but if an entry can't be found
+ * then the policy is queried and the result is added to the table to speedup
+ * future queries.  Returns zero on success, negative values on failure.
+ *
+ */
+int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid)
+{
+	struct sel_netport *port;
+
+	rcu_read_lock();
+	port = sel_netport_find(protocol, pnum);
+	if (port != NULL) {
+		*sid = port->psec.sid;
+		rcu_read_unlock();
+		return 0;
+	}
+	rcu_read_unlock();
+
+	return sel_netport_sid_slow(protocol, pnum, sid);
+}
+
+/**
+ * sel_netport_flush - Flush the entire network port table
+ *
+ * Description:
+ * Remove all entries from the network address table.
+ *
+ */
+void sel_netport_flush(void)
+{
+	unsigned int idx;
+	struct sel_netport *port, *port_tmp;
+
+	spin_lock_bh(&sel_netport_lock);
+	for (idx = 0; idx < SEL_NETPORT_HASH_SIZE; idx++) {
+		list_for_each_entry_safe(port, port_tmp,
+					 &sel_netport_hash[idx].list, list) {
+			list_del_rcu(&port->list);
+			kfree_rcu(port, rcu);
+		}
+		sel_netport_hash[idx].size = 0;
+	}
+	spin_unlock_bh(&sel_netport_lock);
+}
+
+static __init int sel_netport_init(void)
+{
+	int iter;
+
+	if (!selinux_enabled)
+		return 0;
+
+	for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) {
+		INIT_LIST_HEAD(&sel_netport_hash[iter].list);
+		sel_netport_hash[iter].size = 0;
+	}
+
+	return 0;
+}
+
+__initcall(sel_netport_init);
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
new file mode 100644
index 0000000..2bbb418
--- /dev/null
+++ b/security/selinux/nlmsgtab.c
@@ -0,0 +1,192 @@
+/*
+ * Netlink message type permission tables, for user generated messages.
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/if.h>
+#include <linux/inet_diag.h>
+#include <linux/xfrm.h>
+#include <linux/audit.h>
+#include <linux/sock_diag.h>
+
+#include "flask.h"
+#include "av_permissions.h"
+#include "security.h"
+
+struct nlmsg_perm {
+	u16	nlmsg_type;
+	u32	perm;
+};
+
+static struct nlmsg_perm nlmsg_route_perms[] =
+{
+	{ RTM_NEWLINK,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELLINK,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETLINK,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_SETLINK,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_NEWADDR,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELADDR,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETADDR,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWROUTE,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELROUTE,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETROUTE,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWNEIGH,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELNEIGH,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETNEIGH,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWRULE,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELRULE,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETRULE,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWQDISC,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELQDISC,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETQDISC,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWTCLASS,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELTCLASS,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETTCLASS,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWTFILTER,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELTFILTER,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETTFILTER,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWACTION,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELACTION,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETACTION,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWPREFIX,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETMULTICAST,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_GETANYCAST,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_GETNEIGHTBL,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_SETNEIGHTBL,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_NEWADDRLABEL,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELADDRLABEL,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETADDRLABEL,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_GETDCB,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_SETDCB,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_NEWNETCONF,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_GETNETCONF,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWMDB,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELMDB,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE  },
+	{ RTM_GETMDB,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWNSID,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+	{ RTM_DELNSID,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_GETNSID,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+};
+
+static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
+{
+	{ TCPDIAG_GETSOCK,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+	{ DCCPDIAG_GETSOCK,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+	{ SOCK_DIAG_BY_FAMILY,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+};
+
+static struct nlmsg_perm nlmsg_xfrm_perms[] =
+{
+	{ XFRM_MSG_NEWSA,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_DELSA,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_GETSA,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_NEWPOLICY,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_DELPOLICY,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_GETPOLICY,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_ALLOCSPI,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_ACQUIRE,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_EXPIRE,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_UPDPOLICY,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_UPDSA,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_POLEXPIRE,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_FLUSHSA,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_FLUSHPOLICY,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_NEWAE,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_GETAE,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_REPORT,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_MIGRATE,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_NEWSADINFO,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_GETSADINFO,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_NEWSPDINFO,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_GETSPDINFO,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_MAPPING,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+};
+
+static struct nlmsg_perm nlmsg_audit_perms[] =
+{
+	{ AUDIT_GET,		NETLINK_AUDIT_SOCKET__NLMSG_READ     },
+	{ AUDIT_SET,		NETLINK_AUDIT_SOCKET__NLMSG_WRITE    },
+	{ AUDIT_LIST,		NETLINK_AUDIT_SOCKET__NLMSG_READPRIV },
+	{ AUDIT_ADD,		NETLINK_AUDIT_SOCKET__NLMSG_WRITE    },
+	{ AUDIT_DEL,		NETLINK_AUDIT_SOCKET__NLMSG_WRITE    },
+	{ AUDIT_LIST_RULES,	NETLINK_AUDIT_SOCKET__NLMSG_READPRIV },
+	{ AUDIT_ADD_RULE,	NETLINK_AUDIT_SOCKET__NLMSG_WRITE    },
+	{ AUDIT_DEL_RULE,	NETLINK_AUDIT_SOCKET__NLMSG_WRITE    },
+	{ AUDIT_USER,		NETLINK_AUDIT_SOCKET__NLMSG_RELAY    },
+	{ AUDIT_SIGNAL_INFO,	NETLINK_AUDIT_SOCKET__NLMSG_READ     },
+	{ AUDIT_TRIM,		NETLINK_AUDIT_SOCKET__NLMSG_WRITE    },
+	{ AUDIT_MAKE_EQUIV,	NETLINK_AUDIT_SOCKET__NLMSG_WRITE    },
+	{ AUDIT_TTY_GET,	NETLINK_AUDIT_SOCKET__NLMSG_READ     },
+	{ AUDIT_TTY_SET,	NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT	},
+	{ AUDIT_GET_FEATURE,	NETLINK_AUDIT_SOCKET__NLMSG_READ     },
+	{ AUDIT_SET_FEATURE,	NETLINK_AUDIT_SOCKET__NLMSG_WRITE    },
+};
+
+
+static int nlmsg_perm(u16 nlmsg_type, u32 *perm, struct nlmsg_perm *tab, size_t tabsize)
+{
+	int i, err = -EINVAL;
+
+	for (i = 0; i < tabsize/sizeof(struct nlmsg_perm); i++)
+		if (nlmsg_type == tab[i].nlmsg_type) {
+			*perm = tab[i].perm;
+			err = 0;
+			break;
+		}
+
+	return err;
+}
+
+int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
+{
+	int err = 0;
+
+	switch (sclass) {
+	case SECCLASS_NETLINK_ROUTE_SOCKET:
+		/* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */
+		BUILD_BUG_ON(RTM_MAX != (RTM_NEWNSID + 3));
+		err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
+				 sizeof(nlmsg_route_perms));
+		break;
+
+	case SECCLASS_NETLINK_TCPDIAG_SOCKET:
+		err = nlmsg_perm(nlmsg_type, perm, nlmsg_tcpdiag_perms,
+				 sizeof(nlmsg_tcpdiag_perms));
+		break;
+
+	case SECCLASS_NETLINK_XFRM_SOCKET:
+		BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING);
+		err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
+				 sizeof(nlmsg_xfrm_perms));
+		break;
+
+	case SECCLASS_NETLINK_AUDIT_SOCKET:
+		if ((nlmsg_type >= AUDIT_FIRST_USER_MSG &&
+		     nlmsg_type <= AUDIT_LAST_USER_MSG) ||
+		    (nlmsg_type >= AUDIT_FIRST_USER_MSG2 &&
+		     nlmsg_type <= AUDIT_LAST_USER_MSG2)) {
+			*perm = NETLINK_AUDIT_SOCKET__NLMSG_RELAY;
+		} else {
+			err = nlmsg_perm(nlmsg_type, perm, nlmsg_audit_perms,
+					 sizeof(nlmsg_audit_perms));
+		}
+		break;
+
+	/* No messaging from userspace, or class unknown/unhandled */
+	default:
+		err = -ENOENT;
+		break;
+	}
+
+	return err;
+}
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
new file mode 100644
index 0000000..c02da25
--- /dev/null
+++ b/security/selinux/selinuxfs.c
@@ -0,0 +1,1885 @@
+/* Updated: Karl MacMillan <kmacmillan@tresys.com>
+ *
+ *	Added conditional policy language extensions
+ *
+ *  Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ *	Added support for the policy capability bitmap
+ *
+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/security.h>
+#include <linux/major.h>
+#include <linux/seq_file.h>
+#include <linux/percpu.h>
+#include <linux/audit.h>
+#include <linux/uaccess.h>
+#include <linux/kobject.h>
+#include <linux/ctype.h>
+
+/* selinuxfs pseudo filesystem for exporting the security policy API.
+   Based on the proc code and the fs/nfsd/nfsctl.c code. */
+
+#include "flask.h"
+#include "avc.h"
+#include "avc_ss.h"
+#include "security.h"
+#include "objsec.h"
+#include "conditional.h"
+
+/* Policy capability filenames */
+static char *policycap_names[] = {
+	"network_peer_controls",
+	"open_perms",
+	"redhat1",
+	"always_check_network"
+};
+
+unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
+
+static int __init checkreqprot_setup(char *str)
+{
+	unsigned long checkreqprot;
+	if (!kstrtoul(str, 0, &checkreqprot))
+		selinux_checkreqprot = checkreqprot ? 1 : 0;
+	return 1;
+}
+__setup("checkreqprot=", checkreqprot_setup);
+
+static DEFINE_MUTEX(sel_mutex);
+
+/* global data for booleans */
+static struct dentry *bool_dir;
+static int bool_num;
+static char **bool_pending_names;
+static int *bool_pending_values;
+
+/* global data for classes */
+static struct dentry *class_dir;
+static unsigned long last_class_ino;
+
+static char policy_opened;
+
+/* global data for policy capabilities */
+static struct dentry *policycap_dir;
+
+/* Check whether a task is allowed to use a security operation. */
+static int task_has_security(struct task_struct *tsk,
+			     u32 perms)
+{
+	const struct task_security_struct *tsec;
+	u32 sid = 0;
+
+	rcu_read_lock();
+	tsec = __task_cred(tsk)->security;
+	if (tsec)
+		sid = tsec->sid;
+	rcu_read_unlock();
+	if (!tsec)
+		return -EACCES;
+
+	return avc_has_perm(sid, SECINITSID_SECURITY,
+			    SECCLASS_SECURITY, perms, NULL);
+}
+
+enum sel_inos {
+	SEL_ROOT_INO = 2,
+	SEL_LOAD,	/* load policy */
+	SEL_ENFORCE,	/* get or set enforcing status */
+	SEL_CONTEXT,	/* validate context */
+	SEL_ACCESS,	/* compute access decision */
+	SEL_CREATE,	/* compute create labeling decision */
+	SEL_RELABEL,	/* compute relabeling decision */
+	SEL_USER,	/* compute reachable user contexts */
+	SEL_POLICYVERS,	/* return policy version for this kernel */
+	SEL_COMMIT_BOOLS, /* commit new boolean values */
+	SEL_MLS,	/* return if MLS policy is enabled */
+	SEL_DISABLE,	/* disable SELinux until next reboot */
+	SEL_MEMBER,	/* compute polyinstantiation membership decision */
+	SEL_CHECKREQPROT, /* check requested protection, not kernel-applied one */
+	SEL_COMPAT_NET,	/* whether to use old compat network packet controls */
+	SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */
+	SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */
+	SEL_STATUS,	/* export current status using mmap() */
+	SEL_POLICY,	/* allow userspace to read the in kernel policy */
+	SEL_INO_NEXT,	/* The next inode number to use */
+};
+
+static unsigned long sel_last_ino = SEL_INO_NEXT - 1;
+
+#define SEL_INITCON_INO_OFFSET		0x01000000
+#define SEL_BOOL_INO_OFFSET		0x02000000
+#define SEL_CLASS_INO_OFFSET		0x04000000
+#define SEL_POLICYCAP_INO_OFFSET	0x08000000
+#define SEL_INO_MASK			0x00ffffff
+
+#define TMPBUFLEN	12
+static ssize_t sel_read_enforce(struct file *filp, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	char tmpbuf[TMPBUFLEN];
+	ssize_t length;
+
+	length = scnprintf(tmpbuf, TMPBUFLEN, "%d", selinux_enforcing);
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
+static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+
+{
+	char *page = NULL;
+	ssize_t length;
+	int new_value;
+
+	length = -ENOMEM;
+	if (count >= PAGE_SIZE)
+		goto out;
+
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	length = -ENOMEM;
+	page = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!page)
+		goto out;
+
+	length = -EFAULT;
+	if (copy_from_user(page, buf, count))
+		goto out;
+
+	length = -EINVAL;
+	if (sscanf(page, "%d", &new_value) != 1)
+		goto out;
+
+	if (new_value != selinux_enforcing) {
+		length = task_has_security(current, SECURITY__SETENFORCE);
+		if (length)
+			goto out;
+		audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
+			"enforcing=%d old_enforcing=%d auid=%u ses=%u",
+			new_value, selinux_enforcing,
+			from_kuid(&init_user_ns, audit_get_loginuid(current)),
+			audit_get_sessionid(current));
+		selinux_enforcing = new_value;
+		if (selinux_enforcing)
+			avc_ss_reset(0);
+		selnl_notify_setenforce(selinux_enforcing);
+		selinux_status_update_setenforce(selinux_enforcing);
+	}
+	length = count;
+out:
+	free_page((unsigned long) page);
+	return length;
+}
+#else
+#define sel_write_enforce NULL
+#endif
+
+static const struct file_operations sel_enforce_ops = {
+	.read		= sel_read_enforce,
+	.write		= sel_write_enforce,
+	.llseek		= generic_file_llseek,
+};
+
+static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
+					size_t count, loff_t *ppos)
+{
+	char tmpbuf[TMPBUFLEN];
+	ssize_t length;
+	ino_t ino = file_inode(filp)->i_ino;
+	int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ?
+		security_get_reject_unknown() : !security_get_allow_unknown();
+
+	length = scnprintf(tmpbuf, TMPBUFLEN, "%d", handle_unknown);
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static const struct file_operations sel_handle_unknown_ops = {
+	.read		= sel_read_handle_unknown,
+	.llseek		= generic_file_llseek,
+};
+
+static int sel_open_handle_status(struct inode *inode, struct file *filp)
+{
+	struct page    *status = selinux_kernel_status_page();
+
+	if (!status)
+		return -ENOMEM;
+
+	filp->private_data = status;
+
+	return 0;
+}
+
+static ssize_t sel_read_handle_status(struct file *filp, char __user *buf,
+				      size_t count, loff_t *ppos)
+{
+	struct page    *status = filp->private_data;
+
+	BUG_ON(!status);
+
+	return simple_read_from_buffer(buf, count, ppos,
+				       page_address(status),
+				       sizeof(struct selinux_kernel_status));
+}
+
+static int sel_mmap_handle_status(struct file *filp,
+				  struct vm_area_struct *vma)
+{
+	struct page    *status = filp->private_data;
+	unsigned long	size = vma->vm_end - vma->vm_start;
+
+	BUG_ON(!status);
+
+	/* only allows one page from the head */
+	if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
+		return -EIO;
+	/* disallow writable mapping */
+	if (vma->vm_flags & VM_WRITE)
+		return -EPERM;
+	/* disallow mprotect() turns it into writable */
+	vma->vm_flags &= ~VM_MAYWRITE;
+
+	return remap_pfn_range(vma, vma->vm_start,
+			       page_to_pfn(status),
+			       size, vma->vm_page_prot);
+}
+
+static const struct file_operations sel_handle_status_ops = {
+	.open		= sel_open_handle_status,
+	.read		= sel_read_handle_status,
+	.mmap		= sel_mmap_handle_status,
+	.llseek		= generic_file_llseek,
+};
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+static ssize_t sel_write_disable(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+
+{
+	char *page = NULL;
+	ssize_t length;
+	int new_value;
+
+	length = -ENOMEM;
+	if (count >= PAGE_SIZE)
+		goto out;
+
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	length = -ENOMEM;
+	page = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!page)
+		goto out;
+
+	length = -EFAULT;
+	if (copy_from_user(page, buf, count))
+		goto out;
+
+	length = -EINVAL;
+	if (sscanf(page, "%d", &new_value) != 1)
+		goto out;
+
+	if (new_value) {
+		length = selinux_disable();
+		if (length)
+			goto out;
+		audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
+			"selinux=0 auid=%u ses=%u",
+			from_kuid(&init_user_ns, audit_get_loginuid(current)),
+			audit_get_sessionid(current));
+	}
+
+	length = count;
+out:
+	free_page((unsigned long) page);
+	return length;
+}
+#else
+#define sel_write_disable NULL
+#endif
+
+static const struct file_operations sel_disable_ops = {
+	.write		= sel_write_disable,
+	.llseek		= generic_file_llseek,
+};
+
+static ssize_t sel_read_policyvers(struct file *filp, char __user *buf,
+				   size_t count, loff_t *ppos)
+{
+	char tmpbuf[TMPBUFLEN];
+	ssize_t length;
+
+	length = scnprintf(tmpbuf, TMPBUFLEN, "%u", POLICYDB_VERSION_MAX);
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static const struct file_operations sel_policyvers_ops = {
+	.read		= sel_read_policyvers,
+	.llseek		= generic_file_llseek,
+};
+
+/* declaration for sel_write_load */
+static int sel_make_bools(void);
+static int sel_make_classes(void);
+static int sel_make_policycap(void);
+
+/* declaration for sel_make_class_dirs */
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
+			unsigned long *ino);
+
+static ssize_t sel_read_mls(struct file *filp, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	char tmpbuf[TMPBUFLEN];
+	ssize_t length;
+
+	length = scnprintf(tmpbuf, TMPBUFLEN, "%d",
+			   security_mls_enabled());
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static const struct file_operations sel_mls_ops = {
+	.read		= sel_read_mls,
+	.llseek		= generic_file_llseek,
+};
+
+struct policy_load_memory {
+	size_t len;
+	void *data;
+};
+
+static int sel_open_policy(struct inode *inode, struct file *filp)
+{
+	struct policy_load_memory *plm = NULL;
+	int rc;
+
+	BUG_ON(filp->private_data);
+
+	mutex_lock(&sel_mutex);
+
+	rc = task_has_security(current, SECURITY__READ_POLICY);
+	if (rc)
+		goto err;
+
+	rc = -EBUSY;
+	if (policy_opened)
+		goto err;
+
+	rc = -ENOMEM;
+	plm = kzalloc(sizeof(*plm), GFP_KERNEL);
+	if (!plm)
+		goto err;
+
+	if (i_size_read(inode) != security_policydb_len()) {
+		mutex_lock(&inode->i_mutex);
+		i_size_write(inode, security_policydb_len());
+		mutex_unlock(&inode->i_mutex);
+	}
+
+	rc = security_read_policy(&plm->data, &plm->len);
+	if (rc)
+		goto err;
+
+	policy_opened = 1;
+
+	filp->private_data = plm;
+
+	mutex_unlock(&sel_mutex);
+
+	return 0;
+err:
+	mutex_unlock(&sel_mutex);
+
+	if (plm)
+		vfree(plm->data);
+	kfree(plm);
+	return rc;
+}
+
+static int sel_release_policy(struct inode *inode, struct file *filp)
+{
+	struct policy_load_memory *plm = filp->private_data;
+
+	BUG_ON(!plm);
+
+	policy_opened = 0;
+
+	vfree(plm->data);
+	kfree(plm);
+
+	return 0;
+}
+
+static ssize_t sel_read_policy(struct file *filp, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	struct policy_load_memory *plm = filp->private_data;
+	int ret;
+
+	mutex_lock(&sel_mutex);
+
+	ret = task_has_security(current, SECURITY__READ_POLICY);
+	if (ret)
+		goto out;
+
+	ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
+out:
+	mutex_unlock(&sel_mutex);
+	return ret;
+}
+
+static int sel_mmap_policy_fault(struct vm_area_struct *vma,
+				 struct vm_fault *vmf)
+{
+	struct policy_load_memory *plm = vma->vm_file->private_data;
+	unsigned long offset;
+	struct page *page;
+
+	if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
+		return VM_FAULT_SIGBUS;
+
+	offset = vmf->pgoff << PAGE_SHIFT;
+	if (offset >= roundup(plm->len, PAGE_SIZE))
+		return VM_FAULT_SIGBUS;
+
+	page = vmalloc_to_page(plm->data + offset);
+	get_page(page);
+
+	vmf->page = page;
+
+	return 0;
+}
+
+static const struct vm_operations_struct sel_mmap_policy_ops = {
+	.fault = sel_mmap_policy_fault,
+	.page_mkwrite = sel_mmap_policy_fault,
+};
+
+static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHARED) {
+		/* do not allow mprotect to make mapping writable */
+		vma->vm_flags &= ~VM_MAYWRITE;
+
+		if (vma->vm_flags & VM_WRITE)
+			return -EACCES;
+	}
+
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_ops = &sel_mmap_policy_ops;
+
+	return 0;
+}
+
+static const struct file_operations sel_policy_ops = {
+	.open		= sel_open_policy,
+	.read		= sel_read_policy,
+	.mmap		= sel_mmap_policy,
+	.release	= sel_release_policy,
+	.llseek		= generic_file_llseek,
+};
+
+static ssize_t sel_write_load(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+
+{
+	ssize_t length;
+	void *data = NULL;
+
+	mutex_lock(&sel_mutex);
+
+	length = task_has_security(current, SECURITY__LOAD_POLICY);
+	if (length)
+		goto out;
+
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	length = -EFBIG;
+	if (count > 64 * 1024 * 1024)
+		goto out;
+
+	length = -ENOMEM;
+	data = vmalloc(count);
+	if (!data)
+		goto out;
+
+	length = -EFAULT;
+	if (copy_from_user(data, buf, count) != 0)
+		goto out;
+
+	length = security_load_policy(data, count);
+	if (length)
+		goto out;
+
+	length = sel_make_bools();
+	if (length)
+		goto out1;
+
+	length = sel_make_classes();
+	if (length)
+		goto out1;
+
+	length = sel_make_policycap();
+	if (length)
+		goto out1;
+
+	length = count;
+
+out1:
+	audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
+		"policy loaded auid=%u ses=%u",
+		from_kuid(&init_user_ns, audit_get_loginuid(current)),
+		audit_get_sessionid(current));
+out:
+	mutex_unlock(&sel_mutex);
+	vfree(data);
+	return length;
+}
+
+static const struct file_operations sel_load_ops = {
+	.write		= sel_write_load,
+	.llseek		= generic_file_llseek,
+};
+
+static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
+{
+	char *canon = NULL;
+	u32 sid, len;
+	ssize_t length;
+
+	length = task_has_security(current, SECURITY__CHECK_CONTEXT);
+	if (length)
+		goto out;
+
+	length = security_context_to_sid(buf, size, &sid, GFP_KERNEL);
+	if (length)
+		goto out;
+
+	length = security_sid_to_context(sid, &canon, &len);
+	if (length)
+		goto out;
+
+	length = -ERANGE;
+	if (len > SIMPLE_TRANSACTION_LIMIT) {
+		printk(KERN_ERR "SELinux: %s:  context size (%u) exceeds "
+			"payload max\n", __func__, len);
+		goto out;
+	}
+
+	memcpy(buf, canon, len);
+	length = len;
+out:
+	kfree(canon);
+	return length;
+}
+
+static ssize_t sel_read_checkreqprot(struct file *filp, char __user *buf,
+				     size_t count, loff_t *ppos)
+{
+	char tmpbuf[TMPBUFLEN];
+	ssize_t length;
+
+	length = scnprintf(tmpbuf, TMPBUFLEN, "%u", selinux_checkreqprot);
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
+				      size_t count, loff_t *ppos)
+{
+	char *page = NULL;
+	ssize_t length;
+	unsigned int new_value;
+
+	length = task_has_security(current, SECURITY__SETCHECKREQPROT);
+	if (length)
+		goto out;
+
+	length = -ENOMEM;
+	if (count >= PAGE_SIZE)
+		goto out;
+
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	length = -ENOMEM;
+	page = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!page)
+		goto out;
+
+	length = -EFAULT;
+	if (copy_from_user(page, buf, count))
+		goto out;
+
+	length = -EINVAL;
+	if (sscanf(page, "%u", &new_value) != 1)
+		goto out;
+
+	selinux_checkreqprot = new_value ? 1 : 0;
+	length = count;
+out:
+	free_page((unsigned long) page);
+	return length;
+}
+static const struct file_operations sel_checkreqprot_ops = {
+	.read		= sel_read_checkreqprot,
+	.write		= sel_write_checkreqprot,
+	.llseek		= generic_file_llseek,
+};
+
+/*
+ * Remaining nodes use transaction based IO methods like nfsd/nfsctl.c
+ */
+static ssize_t sel_write_access(struct file *file, char *buf, size_t size);
+static ssize_t sel_write_create(struct file *file, char *buf, size_t size);
+static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size);
+static ssize_t sel_write_user(struct file *file, char *buf, size_t size);
+static ssize_t sel_write_member(struct file *file, char *buf, size_t size);
+
+static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+	[SEL_ACCESS] = sel_write_access,
+	[SEL_CREATE] = sel_write_create,
+	[SEL_RELABEL] = sel_write_relabel,
+	[SEL_USER] = sel_write_user,
+	[SEL_MEMBER] = sel_write_member,
+	[SEL_CONTEXT] = sel_write_context,
+};
+
+static ssize_t selinux_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
+{
+	ino_t ino = file_inode(file)->i_ino;
+	char *data;
+	ssize_t rv;
+
+	if (ino >= ARRAY_SIZE(write_op) || !write_op[ino])
+		return -EINVAL;
+
+	data = simple_transaction_get(file, buf, size);
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	rv = write_op[ino](file, data, size);
+	if (rv > 0) {
+		simple_transaction_set(file, rv);
+		rv = size;
+	}
+	return rv;
+}
+
+static const struct file_operations transaction_ops = {
+	.write		= selinux_transaction_write,
+	.read		= simple_transaction_read,
+	.release	= simple_transaction_release,
+	.llseek		= generic_file_llseek,
+};
+
+/*
+ * payload - write methods
+ * If the method has a response, the response should be put in buf,
+ * and the length returned.  Otherwise return 0 or and -error.
+ */
+
+static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
+{
+	char *scon = NULL, *tcon = NULL;
+	u32 ssid, tsid;
+	u16 tclass;
+	struct av_decision avd;
+	ssize_t length;
+
+	length = task_has_security(current, SECURITY__COMPUTE_AV);
+	if (length)
+		goto out;
+
+	length = -ENOMEM;
+	scon = kzalloc(size + 1, GFP_KERNEL);
+	if (!scon)
+		goto out;
+
+	length = -ENOMEM;
+	tcon = kzalloc(size + 1, GFP_KERNEL);
+	if (!tcon)
+		goto out;
+
+	length = -EINVAL;
+	if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
+		goto out;
+
+	length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
+	if (length)
+		goto out;
+
+	length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
+	if (length)
+		goto out;
+
+	security_compute_av_user(ssid, tsid, tclass, &avd);
+
+	length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT,
+			  "%x %x %x %x %u %x",
+			  avd.allowed, 0xffffffff,
+			  avd.auditallow, avd.auditdeny,
+			  avd.seqno, avd.flags);
+out:
+	kfree(tcon);
+	kfree(scon);
+	return length;
+}
+
+static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
+{
+	char *scon = NULL, *tcon = NULL;
+	char *namebuf = NULL, *objname = NULL;
+	u32 ssid, tsid, newsid;
+	u16 tclass;
+	ssize_t length;
+	char *newcon = NULL;
+	u32 len;
+	int nargs;
+
+	length = task_has_security(current, SECURITY__COMPUTE_CREATE);
+	if (length)
+		goto out;
+
+	length = -ENOMEM;
+	scon = kzalloc(size + 1, GFP_KERNEL);
+	if (!scon)
+		goto out;
+
+	length = -ENOMEM;
+	tcon = kzalloc(size + 1, GFP_KERNEL);
+	if (!tcon)
+		goto out;
+
+	length = -ENOMEM;
+	namebuf = kzalloc(size + 1, GFP_KERNEL);
+	if (!namebuf)
+		goto out;
+
+	length = -EINVAL;
+	nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf);
+	if (nargs < 3 || nargs > 4)
+		goto out;
+	if (nargs == 4) {
+		/*
+		 * If and when the name of new object to be queried contains
+		 * either whitespace or multibyte characters, they shall be
+		 * encoded based on the percentage-encoding rule.
+		 * If not encoded, the sscanf logic picks up only left-half
+		 * of the supplied name; splitted by a whitespace unexpectedly.
+		 */
+		char   *r, *w;
+		int     c1, c2;
+
+		r = w = namebuf;
+		do {
+			c1 = *r++;
+			if (c1 == '+')
+				c1 = ' ';
+			else if (c1 == '%') {
+				c1 = hex_to_bin(*r++);
+				if (c1 < 0)
+					goto out;
+				c2 = hex_to_bin(*r++);
+				if (c2 < 0)
+					goto out;
+				c1 = (c1 << 4) | c2;
+			}
+			*w++ = c1;
+		} while (c1 != '\0');
+
+		objname = namebuf;
+	}
+
+	length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
+	if (length)
+		goto out;
+
+	length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
+	if (length)
+		goto out;
+
+	length = security_transition_sid_user(ssid, tsid, tclass,
+					      objname, &newsid);
+	if (length)
+		goto out;
+
+	length = security_sid_to_context(newsid, &newcon, &len);
+	if (length)
+		goto out;
+
+	length = -ERANGE;
+	if (len > SIMPLE_TRANSACTION_LIMIT) {
+		printk(KERN_ERR "SELinux: %s:  context size (%u) exceeds "
+			"payload max\n", __func__, len);
+		goto out;
+	}
+
+	memcpy(buf, newcon, len);
+	length = len;
+out:
+	kfree(newcon);
+	kfree(namebuf);
+	kfree(tcon);
+	kfree(scon);
+	return length;
+}
+
+static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
+{
+	char *scon = NULL, *tcon = NULL;
+	u32 ssid, tsid, newsid;
+	u16 tclass;
+	ssize_t length;
+	char *newcon = NULL;
+	u32 len;
+
+	length = task_has_security(current, SECURITY__COMPUTE_RELABEL);
+	if (length)
+		goto out;
+
+	length = -ENOMEM;
+	scon = kzalloc(size + 1, GFP_KERNEL);
+	if (!scon)
+		goto out;
+
+	length = -ENOMEM;
+	tcon = kzalloc(size + 1, GFP_KERNEL);
+	if (!tcon)
+		goto out;
+
+	length = -EINVAL;
+	if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
+		goto out;
+
+	length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
+	if (length)
+		goto out;
+
+	length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
+	if (length)
+		goto out;
+
+	length = security_change_sid(ssid, tsid, tclass, &newsid);
+	if (length)
+		goto out;
+
+	length = security_sid_to_context(newsid, &newcon, &len);
+	if (length)
+		goto out;
+
+	length = -ERANGE;
+	if (len > SIMPLE_TRANSACTION_LIMIT)
+		goto out;
+
+	memcpy(buf, newcon, len);
+	length = len;
+out:
+	kfree(newcon);
+	kfree(tcon);
+	kfree(scon);
+	return length;
+}
+
+static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
+{
+	char *con = NULL, *user = NULL, *ptr;
+	u32 sid, *sids = NULL;
+	ssize_t length;
+	char *newcon;
+	int i, rc;
+	u32 len, nsids;
+
+	length = task_has_security(current, SECURITY__COMPUTE_USER);
+	if (length)
+		goto out;
+
+	length = -ENOMEM;
+	con = kzalloc(size + 1, GFP_KERNEL);
+	if (!con)
+		goto out;
+
+	length = -ENOMEM;
+	user = kzalloc(size + 1, GFP_KERNEL);
+	if (!user)
+		goto out;
+
+	length = -EINVAL;
+	if (sscanf(buf, "%s %s", con, user) != 2)
+		goto out;
+
+	length = security_context_str_to_sid(con, &sid, GFP_KERNEL);
+	if (length)
+		goto out;
+
+	length = security_get_user_sids(sid, user, &sids, &nsids);
+	if (length)
+		goto out;
+
+	length = sprintf(buf, "%u", nsids) + 1;
+	ptr = buf + length;
+	for (i = 0; i < nsids; i++) {
+		rc = security_sid_to_context(sids[i], &newcon, &len);
+		if (rc) {
+			length = rc;
+			goto out;
+		}
+		if ((length + len) >= SIMPLE_TRANSACTION_LIMIT) {
+			kfree(newcon);
+			length = -ERANGE;
+			goto out;
+		}
+		memcpy(ptr, newcon, len);
+		kfree(newcon);
+		ptr += len;
+		length += len;
+	}
+out:
+	kfree(sids);
+	kfree(user);
+	kfree(con);
+	return length;
+}
+
+static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
+{
+	char *scon = NULL, *tcon = NULL;
+	u32 ssid, tsid, newsid;
+	u16 tclass;
+	ssize_t length;
+	char *newcon = NULL;
+	u32 len;
+
+	length = task_has_security(current, SECURITY__COMPUTE_MEMBER);
+	if (length)
+		goto out;
+
+	length = -ENOMEM;
+	scon = kzalloc(size + 1, GFP_KERNEL);
+	if (!scon)
+		goto out;
+
+	length = -ENOMEM;
+	tcon = kzalloc(size + 1, GFP_KERNEL);
+	if (!tcon)
+		goto out;
+
+	length = -EINVAL;
+	if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
+		goto out;
+
+	length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
+	if (length)
+		goto out;
+
+	length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
+	if (length)
+		goto out;
+
+	length = security_member_sid(ssid, tsid, tclass, &newsid);
+	if (length)
+		goto out;
+
+	length = security_sid_to_context(newsid, &newcon, &len);
+	if (length)
+		goto out;
+
+	length = -ERANGE;
+	if (len > SIMPLE_TRANSACTION_LIMIT) {
+		printk(KERN_ERR "SELinux: %s:  context size (%u) exceeds "
+			"payload max\n", __func__, len);
+		goto out;
+	}
+
+	memcpy(buf, newcon, len);
+	length = len;
+out:
+	kfree(newcon);
+	kfree(tcon);
+	kfree(scon);
+	return length;
+}
+
+static struct inode *sel_make_inode(struct super_block *sb, int mode)
+{
+	struct inode *ret = new_inode(sb);
+
+	if (ret) {
+		ret->i_mode = mode;
+		ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+	}
+	return ret;
+}
+
+static ssize_t sel_read_bool(struct file *filep, char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	char *page = NULL;
+	ssize_t length;
+	ssize_t ret;
+	int cur_enforcing;
+	unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
+	const char *name = filep->f_path.dentry->d_name.name;
+
+	mutex_lock(&sel_mutex);
+
+	ret = -EINVAL;
+	if (index >= bool_num || strcmp(name, bool_pending_names[index]))
+		goto out;
+
+	ret = -ENOMEM;
+	page = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!page)
+		goto out;
+
+	cur_enforcing = security_get_bool_value(index);
+	if (cur_enforcing < 0) {
+		ret = cur_enforcing;
+		goto out;
+	}
+	length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
+			  bool_pending_values[index]);
+	ret = simple_read_from_buffer(buf, count, ppos, page, length);
+out:
+	mutex_unlock(&sel_mutex);
+	free_page((unsigned long)page);
+	return ret;
+}
+
+static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	char *page = NULL;
+	ssize_t length;
+	int new_value;
+	unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
+	const char *name = filep->f_path.dentry->d_name.name;
+
+	mutex_lock(&sel_mutex);
+
+	length = task_has_security(current, SECURITY__SETBOOL);
+	if (length)
+		goto out;
+
+	length = -EINVAL;
+	if (index >= bool_num || strcmp(name, bool_pending_names[index]))
+		goto out;
+
+	length = -ENOMEM;
+	if (count >= PAGE_SIZE)
+		goto out;
+
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	length = -ENOMEM;
+	page = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!page)
+		goto out;
+
+	length = -EFAULT;
+	if (copy_from_user(page, buf, count))
+		goto out;
+
+	length = -EINVAL;
+	if (sscanf(page, "%d", &new_value) != 1)
+		goto out;
+
+	if (new_value)
+		new_value = 1;
+
+	bool_pending_values[index] = new_value;
+	length = count;
+
+out:
+	mutex_unlock(&sel_mutex);
+	free_page((unsigned long) page);
+	return length;
+}
+
+static const struct file_operations sel_bool_ops = {
+	.read		= sel_read_bool,
+	.write		= sel_write_bool,
+	.llseek		= generic_file_llseek,
+};
+
+static ssize_t sel_commit_bools_write(struct file *filep,
+				      const char __user *buf,
+				      size_t count, loff_t *ppos)
+{
+	char *page = NULL;
+	ssize_t length;
+	int new_value;
+
+	mutex_lock(&sel_mutex);
+
+	length = task_has_security(current, SECURITY__SETBOOL);
+	if (length)
+		goto out;
+
+	length = -ENOMEM;
+	if (count >= PAGE_SIZE)
+		goto out;
+
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	length = -ENOMEM;
+	page = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!page)
+		goto out;
+
+	length = -EFAULT;
+	if (copy_from_user(page, buf, count))
+		goto out;
+
+	length = -EINVAL;
+	if (sscanf(page, "%d", &new_value) != 1)
+		goto out;
+
+	length = 0;
+	if (new_value && bool_pending_values)
+		length = security_set_bools(bool_num, bool_pending_values);
+
+	if (!length)
+		length = count;
+
+out:
+	mutex_unlock(&sel_mutex);
+	free_page((unsigned long) page);
+	return length;
+}
+
+static const struct file_operations sel_commit_bools_ops = {
+	.write		= sel_commit_bools_write,
+	.llseek		= generic_file_llseek,
+};
+
+static void sel_remove_entries(struct dentry *de)
+{
+	d_genocide(de);
+	shrink_dcache_parent(de);
+}
+
+#define BOOL_DIR_NAME "booleans"
+
+static int sel_make_bools(void)
+{
+	int i, ret;
+	ssize_t len;
+	struct dentry *dentry = NULL;
+	struct dentry *dir = bool_dir;
+	struct inode *inode = NULL;
+	struct inode_security_struct *isec;
+	char **names = NULL, *page;
+	int num;
+	int *values = NULL;
+	u32 sid;
+
+	/* remove any existing files */
+	for (i = 0; i < bool_num; i++)
+		kfree(bool_pending_names[i]);
+	kfree(bool_pending_names);
+	kfree(bool_pending_values);
+	bool_num = 0;
+	bool_pending_names = NULL;
+	bool_pending_values = NULL;
+
+	sel_remove_entries(dir);
+
+	ret = -ENOMEM;
+	page = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!page)
+		goto out;
+
+	ret = security_get_bools(&num, &names, &values);
+	if (ret)
+		goto out;
+
+	for (i = 0; i < num; i++) {
+		ret = -ENOMEM;
+		dentry = d_alloc_name(dir, names[i]);
+		if (!dentry)
+			goto out;
+
+		ret = -ENOMEM;
+		inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR);
+		if (!inode)
+			goto out;
+
+		ret = -ENAMETOOLONG;
+		len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
+		if (len >= PAGE_SIZE)
+			goto out;
+
+		isec = (struct inode_security_struct *)inode->i_security;
+		ret = security_genfs_sid("selinuxfs", page, SECCLASS_FILE, &sid);
+		if (ret)
+			goto out;
+
+		isec->sid = sid;
+		isec->initialized = 1;
+		inode->i_fop = &sel_bool_ops;
+		inode->i_ino = i|SEL_BOOL_INO_OFFSET;
+		d_add(dentry, inode);
+	}
+	bool_num = num;
+	bool_pending_names = names;
+	bool_pending_values = values;
+
+	free_page((unsigned long)page);
+	return 0;
+out:
+	free_page((unsigned long)page);
+
+	if (names) {
+		for (i = 0; i < num; i++)
+			kfree(names[i]);
+		kfree(names);
+	}
+	kfree(values);
+	sel_remove_entries(dir);
+
+	return ret;
+}
+
+#define NULL_FILE_NAME "null"
+
+struct path selinux_null;
+
+static ssize_t sel_read_avc_cache_threshold(struct file *filp, char __user *buf,
+					    size_t count, loff_t *ppos)
+{
+	char tmpbuf[TMPBUFLEN];
+	ssize_t length;
+
+	length = scnprintf(tmpbuf, TMPBUFLEN, "%u", avc_cache_threshold);
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static ssize_t sel_write_avc_cache_threshold(struct file *file,
+					     const char __user *buf,
+					     size_t count, loff_t *ppos)
+
+{
+	char *page = NULL;
+	ssize_t ret;
+	int new_value;
+
+	ret = task_has_security(current, SECURITY__SETSECPARAM);
+	if (ret)
+		goto out;
+
+	ret = -ENOMEM;
+	if (count >= PAGE_SIZE)
+		goto out;
+
+	/* No partial writes. */
+	ret = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	ret = -ENOMEM;
+	page = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!page)
+		goto out;
+
+	ret = -EFAULT;
+	if (copy_from_user(page, buf, count))
+		goto out;
+
+	ret = -EINVAL;
+	if (sscanf(page, "%u", &new_value) != 1)
+		goto out;
+
+	avc_cache_threshold = new_value;
+
+	ret = count;
+out:
+	free_page((unsigned long)page);
+	return ret;
+}
+
+static ssize_t sel_read_avc_hash_stats(struct file *filp, char __user *buf,
+				       size_t count, loff_t *ppos)
+{
+	char *page;
+	ssize_t length;
+
+	page = (char *)__get_free_page(GFP_KERNEL);
+	if (!page)
+		return -ENOMEM;
+
+	length = avc_get_hash_stats(page);
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos, page, length);
+	free_page((unsigned long)page);
+
+	return length;
+}
+
+static const struct file_operations sel_avc_cache_threshold_ops = {
+	.read		= sel_read_avc_cache_threshold,
+	.write		= sel_write_avc_cache_threshold,
+	.llseek		= generic_file_llseek,
+};
+
+static const struct file_operations sel_avc_hash_stats_ops = {
+	.read		= sel_read_avc_hash_stats,
+	.llseek		= generic_file_llseek,
+};
+
+#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
+static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx)
+{
+	int cpu;
+
+	for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) {
+		if (!cpu_possible(cpu))
+			continue;
+		*idx = cpu + 1;
+		return &per_cpu(avc_cache_stats, cpu);
+	}
+	return NULL;
+}
+
+static void *sel_avc_stats_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	loff_t n = *pos - 1;
+
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	return sel_avc_get_stat_idx(&n);
+}
+
+static void *sel_avc_stats_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return sel_avc_get_stat_idx(pos);
+}
+
+static int sel_avc_stats_seq_show(struct seq_file *seq, void *v)
+{
+	struct avc_cache_stats *st = v;
+
+	if (v == SEQ_START_TOKEN)
+		seq_printf(seq, "lookups hits misses allocations reclaims "
+			   "frees\n");
+	else {
+		unsigned int lookups = st->lookups;
+		unsigned int misses = st->misses;
+		unsigned int hits = lookups - misses;
+		seq_printf(seq, "%u %u %u %u %u %u\n", lookups,
+			   hits, misses, st->allocations,
+			   st->reclaims, st->frees);
+	}
+	return 0;
+}
+
+static void sel_avc_stats_seq_stop(struct seq_file *seq, void *v)
+{ }
+
+static const struct seq_operations sel_avc_cache_stats_seq_ops = {
+	.start		= sel_avc_stats_seq_start,
+	.next		= sel_avc_stats_seq_next,
+	.show		= sel_avc_stats_seq_show,
+	.stop		= sel_avc_stats_seq_stop,
+};
+
+static int sel_open_avc_cache_stats(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &sel_avc_cache_stats_seq_ops);
+}
+
+static const struct file_operations sel_avc_cache_stats_ops = {
+	.open		= sel_open_avc_cache_stats,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+#endif
+
+static int sel_make_avc_files(struct dentry *dir)
+{
+	int i;
+	static struct tree_descr files[] = {
+		{ "cache_threshold",
+		  &sel_avc_cache_threshold_ops, S_IRUGO|S_IWUSR },
+		{ "hash_stats", &sel_avc_hash_stats_ops, S_IRUGO },
+#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
+		{ "cache_stats", &sel_avc_cache_stats_ops, S_IRUGO },
+#endif
+	};
+
+	for (i = 0; i < ARRAY_SIZE(files); i++) {
+		struct inode *inode;
+		struct dentry *dentry;
+
+		dentry = d_alloc_name(dir, files[i].name);
+		if (!dentry)
+			return -ENOMEM;
+
+		inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
+		if (!inode)
+			return -ENOMEM;
+
+		inode->i_fop = files[i].ops;
+		inode->i_ino = ++sel_last_ino;
+		d_add(dentry, inode);
+	}
+
+	return 0;
+}
+
+static ssize_t sel_read_initcon(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	char *con;
+	u32 sid, len;
+	ssize_t ret;
+
+	sid = file_inode(file)->i_ino&SEL_INO_MASK;
+	ret = security_sid_to_context(sid, &con, &len);
+	if (ret)
+		return ret;
+
+	ret = simple_read_from_buffer(buf, count, ppos, con, len);
+	kfree(con);
+	return ret;
+}
+
+static const struct file_operations sel_initcon_ops = {
+	.read		= sel_read_initcon,
+	.llseek		= generic_file_llseek,
+};
+
+static int sel_make_initcon_files(struct dentry *dir)
+{
+	int i;
+
+	for (i = 1; i <= SECINITSID_NUM; i++) {
+		struct inode *inode;
+		struct dentry *dentry;
+		dentry = d_alloc_name(dir, security_get_initial_sid_context(i));
+		if (!dentry)
+			return -ENOMEM;
+
+		inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
+		if (!inode)
+			return -ENOMEM;
+
+		inode->i_fop = &sel_initcon_ops;
+		inode->i_ino = i|SEL_INITCON_INO_OFFSET;
+		d_add(dentry, inode);
+	}
+
+	return 0;
+}
+
+static inline unsigned long sel_class_to_ino(u16 class)
+{
+	return (class * (SEL_VEC_MAX + 1)) | SEL_CLASS_INO_OFFSET;
+}
+
+static inline u16 sel_ino_to_class(unsigned long ino)
+{
+	return (ino & SEL_INO_MASK) / (SEL_VEC_MAX + 1);
+}
+
+static inline unsigned long sel_perm_to_ino(u16 class, u32 perm)
+{
+	return (class * (SEL_VEC_MAX + 1) + perm) | SEL_CLASS_INO_OFFSET;
+}
+
+static inline u32 sel_ino_to_perm(unsigned long ino)
+{
+	return (ino & SEL_INO_MASK) % (SEL_VEC_MAX + 1);
+}
+
+static ssize_t sel_read_class(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ino = file_inode(file)->i_ino;
+	char res[TMPBUFLEN];
+	ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
+	return simple_read_from_buffer(buf, count, ppos, res, len);
+}
+
+static const struct file_operations sel_class_ops = {
+	.read		= sel_read_class,
+	.llseek		= generic_file_llseek,
+};
+
+static ssize_t sel_read_perm(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ino = file_inode(file)->i_ino;
+	char res[TMPBUFLEN];
+	ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
+	return simple_read_from_buffer(buf, count, ppos, res, len);
+}
+
+static const struct file_operations sel_perm_ops = {
+	.read		= sel_read_perm,
+	.llseek		= generic_file_llseek,
+};
+
+static ssize_t sel_read_policycap(struct file *file, char __user *buf,
+				  size_t count, loff_t *ppos)
+{
+	int value;
+	char tmpbuf[TMPBUFLEN];
+	ssize_t length;
+	unsigned long i_ino = file_inode(file)->i_ino;
+
+	value = security_policycap_supported(i_ino & SEL_INO_MASK);
+	length = scnprintf(tmpbuf, TMPBUFLEN, "%d", value);
+
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static const struct file_operations sel_policycap_ops = {
+	.read		= sel_read_policycap,
+	.llseek		= generic_file_llseek,
+};
+
+static int sel_make_perm_files(char *objclass, int classvalue,
+				struct dentry *dir)
+{
+	int i, rc, nperms;
+	char **perms;
+
+	rc = security_get_permissions(objclass, &perms, &nperms);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < nperms; i++) {
+		struct inode *inode;
+		struct dentry *dentry;
+
+		rc = -ENOMEM;
+		dentry = d_alloc_name(dir, perms[i]);
+		if (!dentry)
+			goto out;
+
+		rc = -ENOMEM;
+		inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
+		if (!inode)
+			goto out;
+
+		inode->i_fop = &sel_perm_ops;
+		/* i+1 since perm values are 1-indexed */
+		inode->i_ino = sel_perm_to_ino(classvalue, i + 1);
+		d_add(dentry, inode);
+	}
+	rc = 0;
+out:
+	for (i = 0; i < nperms; i++)
+		kfree(perms[i]);
+	kfree(perms);
+	return rc;
+}
+
+static int sel_make_class_dir_entries(char *classname, int index,
+					struct dentry *dir)
+{
+	struct dentry *dentry = NULL;
+	struct inode *inode = NULL;
+	int rc;
+
+	dentry = d_alloc_name(dir, "index");
+	if (!dentry)
+		return -ENOMEM;
+
+	inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
+	if (!inode)
+		return -ENOMEM;
+
+	inode->i_fop = &sel_class_ops;
+	inode->i_ino = sel_class_to_ino(index);
+	d_add(dentry, inode);
+
+	dentry = sel_make_dir(dir, "perms", &last_class_ino);
+	if (IS_ERR(dentry))
+		return PTR_ERR(dentry);
+
+	rc = sel_make_perm_files(classname, index, dentry);
+
+	return rc;
+}
+
+static int sel_make_classes(void)
+{
+	int rc, nclasses, i;
+	char **classes;
+
+	/* delete any existing entries */
+	sel_remove_entries(class_dir);
+
+	rc = security_get_classes(&classes, &nclasses);
+	if (rc)
+		return rc;
+
+	/* +2 since classes are 1-indexed */
+	last_class_ino = sel_class_to_ino(nclasses + 2);
+
+	for (i = 0; i < nclasses; i++) {
+		struct dentry *class_name_dir;
+
+		class_name_dir = sel_make_dir(class_dir, classes[i],
+				&last_class_ino);
+		if (IS_ERR(class_name_dir)) {
+			rc = PTR_ERR(class_name_dir);
+			goto out;
+		}
+
+		/* i+1 since class values are 1-indexed */
+		rc = sel_make_class_dir_entries(classes[i], i + 1,
+				class_name_dir);
+		if (rc)
+			goto out;
+	}
+	rc = 0;
+out:
+	for (i = 0; i < nclasses; i++)
+		kfree(classes[i]);
+	kfree(classes);
+	return rc;
+}
+
+static int sel_make_policycap(void)
+{
+	unsigned int iter;
+	struct dentry *dentry = NULL;
+	struct inode *inode = NULL;
+
+	sel_remove_entries(policycap_dir);
+
+	for (iter = 0; iter <= POLICYDB_CAPABILITY_MAX; iter++) {
+		if (iter < ARRAY_SIZE(policycap_names))
+			dentry = d_alloc_name(policycap_dir,
+					      policycap_names[iter]);
+		else
+			dentry = d_alloc_name(policycap_dir, "unknown");
+
+		if (dentry == NULL)
+			return -ENOMEM;
+
+		inode = sel_make_inode(policycap_dir->d_sb, S_IFREG | S_IRUGO);
+		if (inode == NULL)
+			return -ENOMEM;
+
+		inode->i_fop = &sel_policycap_ops;
+		inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET;
+		d_add(dentry, inode);
+	}
+
+	return 0;
+}
+
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
+			unsigned long *ino)
+{
+	struct dentry *dentry = d_alloc_name(dir, name);
+	struct inode *inode;
+
+	if (!dentry)
+		return ERR_PTR(-ENOMEM);
+
+	inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO);
+	if (!inode) {
+		dput(dentry);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	inode->i_op = &simple_dir_inode_operations;
+	inode->i_fop = &simple_dir_operations;
+	inode->i_ino = ++(*ino);
+	/* directory inodes start off with i_nlink == 2 (for "." entry) */
+	inc_nlink(inode);
+	d_add(dentry, inode);
+	/* bump link count on parent directory, too */
+	inc_nlink(d_inode(dir));
+
+	return dentry;
+}
+
+static int sel_fill_super(struct super_block *sb, void *data, int silent)
+{
+	int ret;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct inode_security_struct *isec;
+
+	static struct tree_descr selinux_files[] = {
+		[SEL_LOAD] = {"load", &sel_load_ops, S_IRUSR|S_IWUSR},
+		[SEL_ENFORCE] = {"enforce", &sel_enforce_ops, S_IRUGO|S_IWUSR},
+		[SEL_CONTEXT] = {"context", &transaction_ops, S_IRUGO|S_IWUGO},
+		[SEL_ACCESS] = {"access", &transaction_ops, S_IRUGO|S_IWUGO},
+		[SEL_CREATE] = {"create", &transaction_ops, S_IRUGO|S_IWUGO},
+		[SEL_RELABEL] = {"relabel", &transaction_ops, S_IRUGO|S_IWUGO},
+		[SEL_USER] = {"user", &transaction_ops, S_IRUGO|S_IWUGO},
+		[SEL_POLICYVERS] = {"policyvers", &sel_policyvers_ops, S_IRUGO},
+		[SEL_COMMIT_BOOLS] = {"commit_pending_bools", &sel_commit_bools_ops, S_IWUSR},
+		[SEL_MLS] = {"mls", &sel_mls_ops, S_IRUGO},
+		[SEL_DISABLE] = {"disable", &sel_disable_ops, S_IWUSR},
+		[SEL_MEMBER] = {"member", &transaction_ops, S_IRUGO|S_IWUGO},
+		[SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR},
+		[SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO},
+		[SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
+		[SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO},
+		[SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUGO},
+		/* last one */ {""}
+	};
+	ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);
+	if (ret)
+		goto err;
+
+	bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &sel_last_ino);
+	if (IS_ERR(bool_dir)) {
+		ret = PTR_ERR(bool_dir);
+		bool_dir = NULL;
+		goto err;
+	}
+
+	ret = -ENOMEM;
+	dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME);
+	if (!dentry)
+		goto err;
+
+	ret = -ENOMEM;
+	inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO);
+	if (!inode)
+		goto err;
+
+	inode->i_ino = ++sel_last_ino;
+	isec = (struct inode_security_struct *)inode->i_security;
+	isec->sid = SECINITSID_DEVNULL;
+	isec->sclass = SECCLASS_CHR_FILE;
+	isec->initialized = 1;
+
+	init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3));
+	d_add(dentry, inode);
+	selinux_null.dentry = dentry;
+
+	dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino);
+	if (IS_ERR(dentry)) {
+		ret = PTR_ERR(dentry);
+		goto err;
+	}
+
+	ret = sel_make_avc_files(dentry);
+	if (ret)
+		goto err;
+
+	dentry = sel_make_dir(sb->s_root, "initial_contexts", &sel_last_ino);
+	if (IS_ERR(dentry)) {
+		ret = PTR_ERR(dentry);
+		goto err;
+	}
+
+	ret = sel_make_initcon_files(dentry);
+	if (ret)
+		goto err;
+
+	class_dir = sel_make_dir(sb->s_root, "class", &sel_last_ino);
+	if (IS_ERR(class_dir)) {
+		ret = PTR_ERR(class_dir);
+		class_dir = NULL;
+		goto err;
+	}
+
+	policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities", &sel_last_ino);
+	if (IS_ERR(policycap_dir)) {
+		ret = PTR_ERR(policycap_dir);
+		policycap_dir = NULL;
+		goto err;
+	}
+	return 0;
+err:
+	printk(KERN_ERR "SELinux: %s:  failed while creating inodes\n",
+		__func__);
+	return ret;
+}
+
+static struct dentry *sel_mount(struct file_system_type *fs_type,
+		      int flags, const char *dev_name, void *data)
+{
+	return mount_single(fs_type, flags, data, sel_fill_super);
+}
+
+static struct file_system_type sel_fs_type = {
+	.name		= "selinuxfs",
+	.mount		= sel_mount,
+	.kill_sb	= kill_litter_super,
+};
+
+struct vfsmount *selinuxfs_mount;
+
+static int __init init_sel_fs(void)
+{
+	int err;
+
+	if (!selinux_enabled)
+		return 0;
+
+	err = sysfs_create_mount_point(fs_kobj, "selinux");
+	if (err)
+		return err;
+
+	err = register_filesystem(&sel_fs_type);
+	if (err) {
+		sysfs_remove_mount_point(fs_kobj, "selinux");
+		return err;
+	}
+
+	selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type);
+	if (IS_ERR(selinuxfs_mount)) {
+		printk(KERN_ERR "selinuxfs:  could not mount!\n");
+		err = PTR_ERR(selinuxfs_mount);
+		selinuxfs_mount = NULL;
+	}
+
+	return err;
+}
+
+__initcall(init_sel_fs);
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+void exit_sel_fs(void)
+{
+	sysfs_remove_mount_point(fs_kobj, "selinux");
+	kern_unmount(selinuxfs_mount);
+	unregister_filesystem(&sel_fs_type);
+}
+#endif
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
new file mode 100644
index 0000000..3628d3a
--- /dev/null
+++ b/security/selinux/ss/avtab.c
@@ -0,0 +1,672 @@
+/*
+ * Implementation of the access vector table type.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+
+/* Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
+ *
+ *	Added conditional policy language extensions
+ *
+ * Copyright (C) 2003 Tresys Technology, LLC
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation, version 2.
+ *
+ * Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
+ *	Tuned number of hash slots for avtab to reduce memory usage
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include "avtab.h"
+#include "policydb.h"
+
+static struct kmem_cache *avtab_node_cachep;
+static struct kmem_cache *avtab_xperms_cachep;
+
+/* Based on MurmurHash3, written by Austin Appleby and placed in the
+ * public domain.
+ */
+static inline int avtab_hash(struct avtab_key *keyp, u32 mask)
+{
+	static const u32 c1 = 0xcc9e2d51;
+	static const u32 c2 = 0x1b873593;
+	static const u32 r1 = 15;
+	static const u32 r2 = 13;
+	static const u32 m  = 5;
+	static const u32 n  = 0xe6546b64;
+
+	u32 hash = 0;
+
+#define mix(input) { \
+	u32 v = input; \
+	v *= c1; \
+	v = (v << r1) | (v >> (32 - r1)); \
+	v *= c2; \
+	hash ^= v; \
+	hash = (hash << r2) | (hash >> (32 - r2)); \
+	hash = hash * m + n; \
+}
+
+	mix(keyp->target_class);
+	mix(keyp->target_type);
+	mix(keyp->source_type);
+
+#undef mix
+
+	hash ^= hash >> 16;
+	hash *= 0x85ebca6b;
+	hash ^= hash >> 13;
+	hash *= 0xc2b2ae35;
+	hash ^= hash >> 16;
+
+	return hash & mask;
+}
+
+static struct avtab_node*
+avtab_insert_node(struct avtab *h, int hvalue,
+		  struct avtab_node *prev, struct avtab_node *cur,
+		  struct avtab_key *key, struct avtab_datum *datum)
+{
+	struct avtab_node *newnode;
+	struct avtab_extended_perms *xperms;
+	newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
+	if (newnode == NULL)
+		return NULL;
+	newnode->key = *key;
+
+	if (key->specified & AVTAB_XPERMS) {
+		xperms = kmem_cache_zalloc(avtab_xperms_cachep, GFP_KERNEL);
+		if (xperms == NULL) {
+			kmem_cache_free(avtab_node_cachep, newnode);
+			return NULL;
+		}
+		*xperms = *(datum->u.xperms);
+		newnode->datum.u.xperms = xperms;
+	} else {
+		newnode->datum.u.data = datum->u.data;
+	}
+
+	if (prev) {
+		newnode->next = prev->next;
+		prev->next = newnode;
+	} else {
+		newnode->next = flex_array_get_ptr(h->htable, hvalue);
+		if (flex_array_put_ptr(h->htable, hvalue, newnode,
+				       GFP_KERNEL|__GFP_ZERO)) {
+			kmem_cache_free(avtab_node_cachep, newnode);
+			return NULL;
+		}
+	}
+
+	h->nel++;
+	return newnode;
+}
+
+static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum)
+{
+	int hvalue;
+	struct avtab_node *prev, *cur, *newnode;
+	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+
+	if (!h || !h->htable)
+		return -EINVAL;
+
+	hvalue = avtab_hash(key, h->mask);
+	for (prev = NULL, cur = flex_array_get_ptr(h->htable, hvalue);
+	     cur;
+	     prev = cur, cur = cur->next) {
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type == cur->key.target_type &&
+		    key->target_class == cur->key.target_class &&
+		    (specified & cur->key.specified)) {
+			/* extended perms may not be unique */
+			if (specified & AVTAB_XPERMS)
+				break;
+			return -EEXIST;
+		}
+		if (key->source_type < cur->key.source_type)
+			break;
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type < cur->key.target_type)
+			break;
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type == cur->key.target_type &&
+		    key->target_class < cur->key.target_class)
+			break;
+	}
+
+	newnode = avtab_insert_node(h, hvalue, prev, cur, key, datum);
+	if (!newnode)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/* Unlike avtab_insert(), this function allow multiple insertions of the same
+ * key/specified mask into the table, as needed by the conditional avtab.
+ * It also returns a pointer to the node inserted.
+ */
+struct avtab_node *
+avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum)
+{
+	int hvalue;
+	struct avtab_node *prev, *cur;
+	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+
+	if (!h || !h->htable)
+		return NULL;
+	hvalue = avtab_hash(key, h->mask);
+	for (prev = NULL, cur = flex_array_get_ptr(h->htable, hvalue);
+	     cur;
+	     prev = cur, cur = cur->next) {
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type == cur->key.target_type &&
+		    key->target_class == cur->key.target_class &&
+		    (specified & cur->key.specified))
+			break;
+		if (key->source_type < cur->key.source_type)
+			break;
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type < cur->key.target_type)
+			break;
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type == cur->key.target_type &&
+		    key->target_class < cur->key.target_class)
+			break;
+	}
+	return avtab_insert_node(h, hvalue, prev, cur, key, datum);
+}
+
+struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
+{
+	int hvalue;
+	struct avtab_node *cur;
+	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+
+	if (!h || !h->htable)
+		return NULL;
+
+	hvalue = avtab_hash(key, h->mask);
+	for (cur = flex_array_get_ptr(h->htable, hvalue); cur;
+	     cur = cur->next) {
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type == cur->key.target_type &&
+		    key->target_class == cur->key.target_class &&
+		    (specified & cur->key.specified))
+			return &cur->datum;
+
+		if (key->source_type < cur->key.source_type)
+			break;
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type < cur->key.target_type)
+			break;
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type == cur->key.target_type &&
+		    key->target_class < cur->key.target_class)
+			break;
+	}
+
+	return NULL;
+}
+
+/* This search function returns a node pointer, and can be used in
+ * conjunction with avtab_search_next_node()
+ */
+struct avtab_node*
+avtab_search_node(struct avtab *h, struct avtab_key *key)
+{
+	int hvalue;
+	struct avtab_node *cur;
+	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+
+	if (!h || !h->htable)
+		return NULL;
+
+	hvalue = avtab_hash(key, h->mask);
+	for (cur = flex_array_get_ptr(h->htable, hvalue); cur;
+	     cur = cur->next) {
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type == cur->key.target_type &&
+		    key->target_class == cur->key.target_class &&
+		    (specified & cur->key.specified))
+			return cur;
+
+		if (key->source_type < cur->key.source_type)
+			break;
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type < cur->key.target_type)
+			break;
+		if (key->source_type == cur->key.source_type &&
+		    key->target_type == cur->key.target_type &&
+		    key->target_class < cur->key.target_class)
+			break;
+	}
+	return NULL;
+}
+
+struct avtab_node*
+avtab_search_node_next(struct avtab_node *node, int specified)
+{
+	struct avtab_node *cur;
+
+	if (!node)
+		return NULL;
+
+	specified &= ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+	for (cur = node->next; cur; cur = cur->next) {
+		if (node->key.source_type == cur->key.source_type &&
+		    node->key.target_type == cur->key.target_type &&
+		    node->key.target_class == cur->key.target_class &&
+		    (specified & cur->key.specified))
+			return cur;
+
+		if (node->key.source_type < cur->key.source_type)
+			break;
+		if (node->key.source_type == cur->key.source_type &&
+		    node->key.target_type < cur->key.target_type)
+			break;
+		if (node->key.source_type == cur->key.source_type &&
+		    node->key.target_type == cur->key.target_type &&
+		    node->key.target_class < cur->key.target_class)
+			break;
+	}
+	return NULL;
+}
+
+void avtab_destroy(struct avtab *h)
+{
+	int i;
+	struct avtab_node *cur, *temp;
+
+	if (!h || !h->htable)
+		return;
+
+	for (i = 0; i < h->nslot; i++) {
+		cur = flex_array_get_ptr(h->htable, i);
+		while (cur) {
+			temp = cur;
+			cur = cur->next;
+			if (temp->key.specified & AVTAB_XPERMS)
+				kmem_cache_free(avtab_xperms_cachep,
+						temp->datum.u.xperms);
+			kmem_cache_free(avtab_node_cachep, temp);
+		}
+	}
+	flex_array_free(h->htable);
+	h->htable = NULL;
+	h->nslot = 0;
+	h->mask = 0;
+}
+
+int avtab_init(struct avtab *h)
+{
+	h->htable = NULL;
+	h->nel = 0;
+	return 0;
+}
+
+int avtab_alloc(struct avtab *h, u32 nrules)
+{
+	u32 mask = 0;
+	u32 shift = 0;
+	u32 work = nrules;
+	u32 nslot = 0;
+
+	if (nrules == 0)
+		goto avtab_alloc_out;
+
+	while (work) {
+		work  = work >> 1;
+		shift++;
+	}
+	if (shift > 2)
+		shift = shift - 2;
+	nslot = 1 << shift;
+	if (nslot > MAX_AVTAB_HASH_BUCKETS)
+		nslot = MAX_AVTAB_HASH_BUCKETS;
+	mask = nslot - 1;
+
+	h->htable = flex_array_alloc(sizeof(struct avtab_node *), nslot,
+				     GFP_KERNEL | __GFP_ZERO);
+	if (!h->htable)
+		return -ENOMEM;
+
+ avtab_alloc_out:
+	h->nel = 0;
+	h->nslot = nslot;
+	h->mask = mask;
+	printk(KERN_DEBUG "SELinux: %d avtab hash slots, %d rules.\n",
+	       h->nslot, nrules);
+	return 0;
+}
+
+void avtab_hash_eval(struct avtab *h, char *tag)
+{
+	int i, chain_len, slots_used, max_chain_len;
+	unsigned long long chain2_len_sum;
+	struct avtab_node *cur;
+
+	slots_used = 0;
+	max_chain_len = 0;
+	chain2_len_sum = 0;
+	for (i = 0; i < h->nslot; i++) {
+		cur = flex_array_get_ptr(h->htable, i);
+		if (cur) {
+			slots_used++;
+			chain_len = 0;
+			while (cur) {
+				chain_len++;
+				cur = cur->next;
+			}
+
+			if (chain_len > max_chain_len)
+				max_chain_len = chain_len;
+			chain2_len_sum += chain_len * chain_len;
+		}
+	}
+
+	printk(KERN_DEBUG "SELinux: %s:  %d entries and %d/%d buckets used, "
+	       "longest chain length %d sum of chain length^2 %llu\n",
+	       tag, h->nel, slots_used, h->nslot, max_chain_len,
+	       chain2_len_sum);
+}
+
+static uint16_t spec_order[] = {
+	AVTAB_ALLOWED,
+	AVTAB_AUDITDENY,
+	AVTAB_AUDITALLOW,
+	AVTAB_TRANSITION,
+	AVTAB_CHANGE,
+	AVTAB_MEMBER,
+	AVTAB_XPERMS_ALLOWED,
+	AVTAB_XPERMS_AUDITALLOW,
+	AVTAB_XPERMS_DONTAUDIT
+};
+
+int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
+		    int (*insertf)(struct avtab *a, struct avtab_key *k,
+				   struct avtab_datum *d, void *p),
+		    void *p)
+{
+	__le16 buf16[4];
+	u16 enabled;
+	u32 items, items2, val, vers = pol->policyvers;
+	struct avtab_key key;
+	struct avtab_datum datum;
+	struct avtab_extended_perms xperms;
+	__le32 buf32[ARRAY_SIZE(xperms.perms.p)];
+	int i, rc;
+	unsigned set;
+
+	memset(&key, 0, sizeof(struct avtab_key));
+	memset(&datum, 0, sizeof(struct avtab_datum));
+
+	if (vers < POLICYDB_VERSION_AVTAB) {
+		rc = next_entry(buf32, fp, sizeof(u32));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		items2 = le32_to_cpu(buf32[0]);
+		if (items2 > ARRAY_SIZE(buf32)) {
+			printk(KERN_ERR "SELinux: avtab: entry overflow\n");
+			return -EINVAL;
+
+		}
+		rc = next_entry(buf32, fp, sizeof(u32)*items2);
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		items = 0;
+
+		val = le32_to_cpu(buf32[items++]);
+		key.source_type = (u16)val;
+		if (key.source_type != val) {
+			printk(KERN_ERR "SELinux: avtab: truncated source type\n");
+			return -EINVAL;
+		}
+		val = le32_to_cpu(buf32[items++]);
+		key.target_type = (u16)val;
+		if (key.target_type != val) {
+			printk(KERN_ERR "SELinux: avtab: truncated target type\n");
+			return -EINVAL;
+		}
+		val = le32_to_cpu(buf32[items++]);
+		key.target_class = (u16)val;
+		if (key.target_class != val) {
+			printk(KERN_ERR "SELinux: avtab: truncated target class\n");
+			return -EINVAL;
+		}
+
+		val = le32_to_cpu(buf32[items++]);
+		enabled = (val & AVTAB_ENABLED_OLD) ? AVTAB_ENABLED : 0;
+
+		if (!(val & (AVTAB_AV | AVTAB_TYPE))) {
+			printk(KERN_ERR "SELinux: avtab: null entry\n");
+			return -EINVAL;
+		}
+		if ((val & AVTAB_AV) &&
+		    (val & AVTAB_TYPE)) {
+			printk(KERN_ERR "SELinux: avtab: entry has both access vectors and types\n");
+			return -EINVAL;
+		}
+		if (val & AVTAB_XPERMS) {
+			printk(KERN_ERR "SELinux: avtab: entry has extended permissions\n");
+			return -EINVAL;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
+			if (val & spec_order[i]) {
+				key.specified = spec_order[i] | enabled;
+				datum.u.data = le32_to_cpu(buf32[items++]);
+				rc = insertf(a, &key, &datum, p);
+				if (rc)
+					return rc;
+			}
+		}
+
+		if (items != items2) {
+			printk(KERN_ERR "SELinux: avtab: entry only had %d items, expected %d\n", items2, items);
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	rc = next_entry(buf16, fp, sizeof(u16)*4);
+	if (rc) {
+		printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+		return rc;
+	}
+
+	items = 0;
+	key.source_type = le16_to_cpu(buf16[items++]);
+	key.target_type = le16_to_cpu(buf16[items++]);
+	key.target_class = le16_to_cpu(buf16[items++]);
+	key.specified = le16_to_cpu(buf16[items++]);
+
+	if (!policydb_type_isvalid(pol, key.source_type) ||
+	    !policydb_type_isvalid(pol, key.target_type) ||
+	    !policydb_class_isvalid(pol, key.target_class)) {
+		printk(KERN_ERR "SELinux: avtab: invalid type or class\n");
+		return -EINVAL;
+	}
+
+	set = 0;
+	for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
+		if (key.specified & spec_order[i])
+			set++;
+	}
+	if (!set || set > 1) {
+		printk(KERN_ERR "SELinux:  avtab:  more than one specifier\n");
+		return -EINVAL;
+	}
+
+	if ((vers < POLICYDB_VERSION_XPERMS_IOCTL) &&
+			(key.specified & AVTAB_XPERMS)) {
+		printk(KERN_ERR "SELinux:  avtab:  policy version %u does not "
+				"support extended permissions rules and one "
+				"was specified\n", vers);
+		return -EINVAL;
+	} else if (key.specified & AVTAB_XPERMS) {
+		memset(&xperms, 0, sizeof(struct avtab_extended_perms));
+		rc = next_entry(&xperms.specified, fp, sizeof(u8));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		rc = next_entry(&xperms.driver, fp, sizeof(u8));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		rc = next_entry(buf32, fp, sizeof(u32)*ARRAY_SIZE(xperms.perms.p));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		for (i = 0; i < ARRAY_SIZE(xperms.perms.p); i++)
+			xperms.perms.p[i] = le32_to_cpu(buf32[i]);
+		datum.u.xperms = &xperms;
+	} else {
+		rc = next_entry(buf32, fp, sizeof(u32));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		datum.u.data = le32_to_cpu(*buf32);
+	}
+	if ((key.specified & AVTAB_TYPE) &&
+	    !policydb_type_isvalid(pol, datum.u.data)) {
+		printk(KERN_ERR "SELinux: avtab: invalid type\n");
+		return -EINVAL;
+	}
+	return insertf(a, &key, &datum, p);
+}
+
+static int avtab_insertf(struct avtab *a, struct avtab_key *k,
+			 struct avtab_datum *d, void *p)
+{
+	return avtab_insert(a, k, d);
+}
+
+int avtab_read(struct avtab *a, void *fp, struct policydb *pol)
+{
+	int rc;
+	__le32 buf[1];
+	u32 nel, i;
+
+
+	rc = next_entry(buf, fp, sizeof(u32));
+	if (rc < 0) {
+		printk(KERN_ERR "SELinux: avtab: truncated table\n");
+		goto bad;
+	}
+	nel = le32_to_cpu(buf[0]);
+	if (!nel) {
+		printk(KERN_ERR "SELinux: avtab: table is empty\n");
+		rc = -EINVAL;
+		goto bad;
+	}
+
+	rc = avtab_alloc(a, nel);
+	if (rc)
+		goto bad;
+
+	for (i = 0; i < nel; i++) {
+		rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL);
+		if (rc) {
+			if (rc == -ENOMEM)
+				printk(KERN_ERR "SELinux: avtab: out of memory\n");
+			else if (rc == -EEXIST)
+				printk(KERN_ERR "SELinux: avtab: duplicate entry\n");
+
+			goto bad;
+		}
+	}
+
+	rc = 0;
+out:
+	return rc;
+
+bad:
+	avtab_destroy(a);
+	goto out;
+}
+
+int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
+{
+	__le16 buf16[4];
+	__le32 buf32[ARRAY_SIZE(cur->datum.u.xperms->perms.p)];
+	int rc;
+	unsigned int i;
+
+	buf16[0] = cpu_to_le16(cur->key.source_type);
+	buf16[1] = cpu_to_le16(cur->key.target_type);
+	buf16[2] = cpu_to_le16(cur->key.target_class);
+	buf16[3] = cpu_to_le16(cur->key.specified);
+	rc = put_entry(buf16, sizeof(u16), 4, fp);
+	if (rc)
+		return rc;
+
+	if (cur->key.specified & AVTAB_XPERMS) {
+		rc = put_entry(&cur->datum.u.xperms->specified, sizeof(u8), 1, fp);
+		if (rc)
+			return rc;
+		rc = put_entry(&cur->datum.u.xperms->driver, sizeof(u8), 1, fp);
+		if (rc)
+			return rc;
+		for (i = 0; i < ARRAY_SIZE(cur->datum.u.xperms->perms.p); i++)
+			buf32[i] = cpu_to_le32(cur->datum.u.xperms->perms.p[i]);
+		rc = put_entry(buf32, sizeof(u32),
+				ARRAY_SIZE(cur->datum.u.xperms->perms.p), fp);
+	} else {
+		buf32[0] = cpu_to_le32(cur->datum.u.data);
+		rc = put_entry(buf32, sizeof(u32), 1, fp);
+	}
+	if (rc)
+		return rc;
+	return 0;
+}
+
+int avtab_write(struct policydb *p, struct avtab *a, void *fp)
+{
+	unsigned int i;
+	int rc = 0;
+	struct avtab_node *cur;
+	__le32 buf[1];
+
+	buf[0] = cpu_to_le32(a->nel);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < a->nslot; i++) {
+		for (cur = flex_array_get_ptr(a->htable, i); cur;
+		     cur = cur->next) {
+			rc = avtab_write_item(p, cur, fp);
+			if (rc)
+				return rc;
+		}
+	}
+
+	return rc;
+}
+void avtab_cache_init(void)
+{
+	avtab_node_cachep = kmem_cache_create("avtab_node",
+					      sizeof(struct avtab_node),
+					      0, SLAB_PANIC, NULL);
+	avtab_xperms_cachep = kmem_cache_create("avtab_extended_perms",
+						sizeof(struct avtab_extended_perms),
+						0, SLAB_PANIC, NULL);
+}
+
+void avtab_cache_destroy(void)
+{
+	kmem_cache_destroy(avtab_node_cachep);
+	kmem_cache_destroy(avtab_xperms_cachep);
+}
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
new file mode 100644
index 0000000..d946c9d
--- /dev/null
+++ b/security/selinux/ss/avtab.h
@@ -0,0 +1,124 @@
+/*
+ * An access vector table (avtab) is a hash table
+ * of access vectors and transition types indexed
+ * by a type pair and a class.  An access vector
+ * table is used to represent the type enforcement
+ * tables.
+ *
+ *  Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+
+/* Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
+ *
+ * 	Added conditional policy language extensions
+ *
+ * Copyright (C) 2003 Tresys Technology, LLC
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation, version 2.
+ *
+ * Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
+ * 	Tuned number of hash slots for avtab to reduce memory usage
+ */
+#ifndef _SS_AVTAB_H_
+#define _SS_AVTAB_H_
+
+#include "security.h"
+#include <linux/flex_array.h>
+
+struct avtab_key {
+	u16 source_type;	/* source type */
+	u16 target_type;	/* target type */
+	u16 target_class;	/* target object class */
+#define AVTAB_ALLOWED		0x0001
+#define AVTAB_AUDITALLOW	0x0002
+#define AVTAB_AUDITDENY		0x0004
+#define AVTAB_AV		(AVTAB_ALLOWED | AVTAB_AUDITALLOW | AVTAB_AUDITDENY)
+#define AVTAB_TRANSITION	0x0010
+#define AVTAB_MEMBER		0x0020
+#define AVTAB_CHANGE		0x0040
+#define AVTAB_TYPE		(AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE)
+/* extended permissions */
+#define AVTAB_XPERMS_ALLOWED	0x0100
+#define AVTAB_XPERMS_AUDITALLOW	0x0200
+#define AVTAB_XPERMS_DONTAUDIT	0x0400
+#define AVTAB_XPERMS		(AVTAB_XPERMS_ALLOWED | \
+				AVTAB_XPERMS_AUDITALLOW | \
+				AVTAB_XPERMS_DONTAUDIT)
+#define AVTAB_ENABLED_OLD   0x80000000 /* reserved for used in cond_avtab */
+#define AVTAB_ENABLED		0x8000 /* reserved for used in cond_avtab */
+	u16 specified;	/* what field is specified */
+};
+
+/*
+ * For operations that require more than the 32 permissions provided by the avc
+ * extended permissions may be used to provide 256 bits of permissions.
+ */
+struct avtab_extended_perms {
+/* These are not flags. All 256 values may be used */
+#define AVTAB_XPERMS_IOCTLFUNCTION	0x01
+#define AVTAB_XPERMS_IOCTLDRIVER	0x02
+	/* extension of the avtab_key specified */
+	u8 specified; /* ioctl, netfilter, ... */
+	/*
+	 * if 256 bits is not adequate as is often the case with ioctls, then
+	 * multiple extended perms may be used and the driver field
+	 * specifies which permissions are included.
+	 */
+	u8 driver;
+	/* 256 bits of permissions */
+	struct extended_perms_data perms;
+};
+
+struct avtab_datum {
+	union {
+		u32 data; /* access vector or type value */
+		struct avtab_extended_perms *xperms;
+	} u;
+};
+
+struct avtab_node {
+	struct avtab_key key;
+	struct avtab_datum datum;
+	struct avtab_node *next;
+};
+
+struct avtab {
+	struct flex_array *htable;
+	u32 nel;	/* number of elements */
+	u32 nslot;      /* number of hash slots */
+	u32 mask;       /* mask to compute hash func */
+
+};
+
+int avtab_init(struct avtab *);
+int avtab_alloc(struct avtab *, u32);
+struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
+void avtab_destroy(struct avtab *h);
+void avtab_hash_eval(struct avtab *h, char *tag);
+
+struct policydb;
+int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
+		    int (*insert)(struct avtab *a, struct avtab_key *k,
+				  struct avtab_datum *d, void *p),
+		    void *p);
+
+int avtab_read(struct avtab *a, void *fp, struct policydb *pol);
+int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp);
+int avtab_write(struct policydb *p, struct avtab *a, void *fp);
+
+struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key,
+					  struct avtab_datum *datum);
+
+struct avtab_node *avtab_search_node(struct avtab *h, struct avtab_key *key);
+
+struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified);
+
+void avtab_cache_init(void);
+void avtab_cache_destroy(void);
+
+#define MAX_AVTAB_HASH_BITS 16
+#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
+
+#endif	/* _SS_AVTAB_H_ */
+
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
new file mode 100644
index 0000000..456e1a9
--- /dev/null
+++ b/security/selinux/ss/conditional.c
@@ -0,0 +1,665 @@
+/* Authors: Karl MacMillan <kmacmillan@tresys.com>
+ *	    Frank Mayer <mayerf@tresys.com>
+ *
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+#include "security.h"
+#include "conditional.h"
+#include "services.h"
+
+/*
+ * cond_evaluate_expr evaluates a conditional expr
+ * in reverse polish notation. It returns true (1), false (0),
+ * or undefined (-1). Undefined occurs when the expression
+ * exceeds the stack depth of COND_EXPR_MAXDEPTH.
+ */
+static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr)
+{
+
+	struct cond_expr *cur;
+	int s[COND_EXPR_MAXDEPTH];
+	int sp = -1;
+
+	for (cur = expr; cur; cur = cur->next) {
+		switch (cur->expr_type) {
+		case COND_BOOL:
+			if (sp == (COND_EXPR_MAXDEPTH - 1))
+				return -1;
+			sp++;
+			s[sp] = p->bool_val_to_struct[cur->bool - 1]->state;
+			break;
+		case COND_NOT:
+			if (sp < 0)
+				return -1;
+			s[sp] = !s[sp];
+			break;
+		case COND_OR:
+			if (sp < 1)
+				return -1;
+			sp--;
+			s[sp] |= s[sp + 1];
+			break;
+		case COND_AND:
+			if (sp < 1)
+				return -1;
+			sp--;
+			s[sp] &= s[sp + 1];
+			break;
+		case COND_XOR:
+			if (sp < 1)
+				return -1;
+			sp--;
+			s[sp] ^= s[sp + 1];
+			break;
+		case COND_EQ:
+			if (sp < 1)
+				return -1;
+			sp--;
+			s[sp] = (s[sp] == s[sp + 1]);
+			break;
+		case COND_NEQ:
+			if (sp < 1)
+				return -1;
+			sp--;
+			s[sp] = (s[sp] != s[sp + 1]);
+			break;
+		default:
+			return -1;
+		}
+	}
+	return s[0];
+}
+
+/*
+ * evaluate_cond_node evaluates the conditional stored in
+ * a struct cond_node and if the result is different than the
+ * current state of the node it sets the rules in the true/false
+ * list appropriately. If the result of the expression is undefined
+ * all of the rules are disabled for safety.
+ */
+int evaluate_cond_node(struct policydb *p, struct cond_node *node)
+{
+	int new_state;
+	struct cond_av_list *cur;
+
+	new_state = cond_evaluate_expr(p, node->expr);
+	if (new_state != node->cur_state) {
+		node->cur_state = new_state;
+		if (new_state == -1)
+			printk(KERN_ERR "SELinux: expression result was undefined - disabling all rules.\n");
+		/* turn the rules on or off */
+		for (cur = node->true_list; cur; cur = cur->next) {
+			if (new_state <= 0)
+				cur->node->key.specified &= ~AVTAB_ENABLED;
+			else
+				cur->node->key.specified |= AVTAB_ENABLED;
+		}
+
+		for (cur = node->false_list; cur; cur = cur->next) {
+			/* -1 or 1 */
+			if (new_state)
+				cur->node->key.specified &= ~AVTAB_ENABLED;
+			else
+				cur->node->key.specified |= AVTAB_ENABLED;
+		}
+	}
+	return 0;
+}
+
+int cond_policydb_init(struct policydb *p)
+{
+	int rc;
+
+	p->bool_val_to_struct = NULL;
+	p->cond_list = NULL;
+
+	rc = avtab_init(&p->te_cond_avtab);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static void cond_av_list_destroy(struct cond_av_list *list)
+{
+	struct cond_av_list *cur, *next;
+	for (cur = list; cur; cur = next) {
+		next = cur->next;
+		/* the avtab_ptr_t node is destroy by the avtab */
+		kfree(cur);
+	}
+}
+
+static void cond_node_destroy(struct cond_node *node)
+{
+	struct cond_expr *cur_expr, *next_expr;
+
+	for (cur_expr = node->expr; cur_expr; cur_expr = next_expr) {
+		next_expr = cur_expr->next;
+		kfree(cur_expr);
+	}
+	cond_av_list_destroy(node->true_list);
+	cond_av_list_destroy(node->false_list);
+	kfree(node);
+}
+
+static void cond_list_destroy(struct cond_node *list)
+{
+	struct cond_node *next, *cur;
+
+	if (list == NULL)
+		return;
+
+	for (cur = list; cur; cur = next) {
+		next = cur->next;
+		cond_node_destroy(cur);
+	}
+}
+
+void cond_policydb_destroy(struct policydb *p)
+{
+	kfree(p->bool_val_to_struct);
+	avtab_destroy(&p->te_cond_avtab);
+	cond_list_destroy(p->cond_list);
+}
+
+int cond_init_bool_indexes(struct policydb *p)
+{
+	kfree(p->bool_val_to_struct);
+	p->bool_val_to_struct =
+		kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL);
+	if (!p->bool_val_to_struct)
+		return -ENOMEM;
+	return 0;
+}
+
+int cond_destroy_bool(void *key, void *datum, void *p)
+{
+	kfree(key);
+	kfree(datum);
+	return 0;
+}
+
+int cond_index_bool(void *key, void *datum, void *datap)
+{
+	struct policydb *p;
+	struct cond_bool_datum *booldatum;
+	struct flex_array *fa;
+
+	booldatum = datum;
+	p = datap;
+
+	if (!booldatum->value || booldatum->value > p->p_bools.nprim)
+		return -EINVAL;
+
+	fa = p->sym_val_to_name[SYM_BOOLS];
+	if (flex_array_put_ptr(fa, booldatum->value - 1, key,
+			       GFP_KERNEL | __GFP_ZERO))
+		BUG();
+	p->bool_val_to_struct[booldatum->value - 1] = booldatum;
+
+	return 0;
+}
+
+static int bool_isvalid(struct cond_bool_datum *b)
+{
+	if (!(b->state == 0 || b->state == 1))
+		return 0;
+	return 1;
+}
+
+int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp)
+{
+	char *key = NULL;
+	struct cond_bool_datum *booldatum;
+	__le32 buf[3];
+	u32 len;
+	int rc;
+
+	booldatum = kzalloc(sizeof(struct cond_bool_datum), GFP_KERNEL);
+	if (!booldatum)
+		return -ENOMEM;
+
+	rc = next_entry(buf, fp, sizeof buf);
+	if (rc)
+		goto err;
+
+	booldatum->value = le32_to_cpu(buf[0]);
+	booldatum->state = le32_to_cpu(buf[1]);
+
+	rc = -EINVAL;
+	if (!bool_isvalid(booldatum))
+		goto err;
+
+	len = le32_to_cpu(buf[2]);
+
+	rc = -ENOMEM;
+	key = kmalloc(len + 1, GFP_KERNEL);
+	if (!key)
+		goto err;
+	rc = next_entry(key, fp, len);
+	if (rc)
+		goto err;
+	key[len] = '\0';
+	rc = hashtab_insert(h, key, booldatum);
+	if (rc)
+		goto err;
+
+	return 0;
+err:
+	cond_destroy_bool(key, booldatum, NULL);
+	return rc;
+}
+
+struct cond_insertf_data {
+	struct policydb *p;
+	struct cond_av_list *other;
+	struct cond_av_list *head;
+	struct cond_av_list *tail;
+};
+
+static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum *d, void *ptr)
+{
+	struct cond_insertf_data *data = ptr;
+	struct policydb *p = data->p;
+	struct cond_av_list *other = data->other, *list, *cur;
+	struct avtab_node *node_ptr;
+	u8 found;
+	int rc = -EINVAL;
+
+	/*
+	 * For type rules we have to make certain there aren't any
+	 * conflicting rules by searching the te_avtab and the
+	 * cond_te_avtab.
+	 */
+	if (k->specified & AVTAB_TYPE) {
+		if (avtab_search(&p->te_avtab, k)) {
+			printk(KERN_ERR "SELinux: type rule already exists outside of a conditional.\n");
+			goto err;
+		}
+		/*
+		 * If we are reading the false list other will be a pointer to
+		 * the true list. We can have duplicate entries if there is only
+		 * 1 other entry and it is in our true list.
+		 *
+		 * If we are reading the true list (other == NULL) there shouldn't
+		 * be any other entries.
+		 */
+		if (other) {
+			node_ptr = avtab_search_node(&p->te_cond_avtab, k);
+			if (node_ptr) {
+				if (avtab_search_node_next(node_ptr, k->specified)) {
+					printk(KERN_ERR "SELinux: too many conflicting type rules.\n");
+					goto err;
+				}
+				found = 0;
+				for (cur = other; cur; cur = cur->next) {
+					if (cur->node == node_ptr) {
+						found = 1;
+						break;
+					}
+				}
+				if (!found) {
+					printk(KERN_ERR "SELinux: conflicting type rules.\n");
+					goto err;
+				}
+			}
+		} else {
+			if (avtab_search(&p->te_cond_avtab, k)) {
+				printk(KERN_ERR "SELinux: conflicting type rules when adding type rule for true.\n");
+				goto err;
+			}
+		}
+	}
+
+	node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d);
+	if (!node_ptr) {
+		printk(KERN_ERR "SELinux: could not insert rule.\n");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	list = kzalloc(sizeof(struct cond_av_list), GFP_KERNEL);
+	if (!list) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	list->node = node_ptr;
+	if (!data->head)
+		data->head = list;
+	else
+		data->tail->next = list;
+	data->tail = list;
+	return 0;
+
+err:
+	cond_av_list_destroy(data->head);
+	data->head = NULL;
+	return rc;
+}
+
+static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list **ret_list, struct cond_av_list *other)
+{
+	int i, rc;
+	__le32 buf[1];
+	u32 len;
+	struct cond_insertf_data data;
+
+	*ret_list = NULL;
+
+	len = 0;
+	rc = next_entry(buf, fp, sizeof(u32));
+	if (rc)
+		return rc;
+
+	len = le32_to_cpu(buf[0]);
+	if (len == 0)
+		return 0;
+
+	data.p = p;
+	data.other = other;
+	data.head = NULL;
+	data.tail = NULL;
+	for (i = 0; i < len; i++) {
+		rc = avtab_read_item(&p->te_cond_avtab, fp, p, cond_insertf,
+				     &data);
+		if (rc)
+			return rc;
+	}
+
+	*ret_list = data.head;
+	return 0;
+}
+
+static int expr_isvalid(struct policydb *p, struct cond_expr *expr)
+{
+	if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) {
+		printk(KERN_ERR "SELinux: conditional expressions uses unknown operator.\n");
+		return 0;
+	}
+
+	if (expr->bool > p->p_bools.nprim) {
+		printk(KERN_ERR "SELinux: conditional expressions uses unknown bool.\n");
+		return 0;
+	}
+	return 1;
+}
+
+static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
+{
+	__le32 buf[2];
+	u32 len, i;
+	int rc;
+	struct cond_expr *expr = NULL, *last = NULL;
+
+	rc = next_entry(buf, fp, sizeof(u32) * 2);
+	if (rc)
+		goto err;
+
+	node->cur_state = le32_to_cpu(buf[0]);
+
+	/* expr */
+	len = le32_to_cpu(buf[1]);
+
+	for (i = 0; i < len; i++) {
+		rc = next_entry(buf, fp, sizeof(u32) * 2);
+		if (rc)
+			goto err;
+
+		rc = -ENOMEM;
+		expr = kzalloc(sizeof(struct cond_expr), GFP_KERNEL);
+		if (!expr)
+			goto err;
+
+		expr->expr_type = le32_to_cpu(buf[0]);
+		expr->bool = le32_to_cpu(buf[1]);
+
+		if (!expr_isvalid(p, expr)) {
+			rc = -EINVAL;
+			kfree(expr);
+			goto err;
+		}
+
+		if (i == 0)
+			node->expr = expr;
+		else
+			last->next = expr;
+		last = expr;
+	}
+
+	rc = cond_read_av_list(p, fp, &node->true_list, NULL);
+	if (rc)
+		goto err;
+	rc = cond_read_av_list(p, fp, &node->false_list, node->true_list);
+	if (rc)
+		goto err;
+	return 0;
+err:
+	cond_node_destroy(node);
+	return rc;
+}
+
+int cond_read_list(struct policydb *p, void *fp)
+{
+	struct cond_node *node, *last = NULL;
+	__le32 buf[1];
+	u32 i, len;
+	int rc;
+
+	rc = next_entry(buf, fp, sizeof buf);
+	if (rc)
+		return rc;
+
+	len = le32_to_cpu(buf[0]);
+
+	rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel);
+	if (rc)
+		goto err;
+
+	for (i = 0; i < len; i++) {
+		rc = -ENOMEM;
+		node = kzalloc(sizeof(struct cond_node), GFP_KERNEL);
+		if (!node)
+			goto err;
+
+		rc = cond_read_node(p, node, fp);
+		if (rc)
+			goto err;
+
+		if (i == 0)
+			p->cond_list = node;
+		else
+			last->next = node;
+		last = node;
+	}
+	return 0;
+err:
+	cond_list_destroy(p->cond_list);
+	p->cond_list = NULL;
+	return rc;
+}
+
+int cond_write_bool(void *vkey, void *datum, void *ptr)
+{
+	char *key = vkey;
+	struct cond_bool_datum *booldatum = datum;
+	struct policy_data *pd = ptr;
+	void *fp = pd->fp;
+	__le32 buf[3];
+	u32 len;
+	int rc;
+
+	len = strlen(key);
+	buf[0] = cpu_to_le32(booldatum->value);
+	buf[1] = cpu_to_le32(booldatum->state);
+	buf[2] = cpu_to_le32(len);
+	rc = put_entry(buf, sizeof(u32), 3, fp);
+	if (rc)
+		return rc;
+	rc = put_entry(key, 1, len, fp);
+	if (rc)
+		return rc;
+	return 0;
+}
+
+/*
+ * cond_write_cond_av_list doesn't write out the av_list nodes.
+ * Instead it writes out the key/value pairs from the avtab. This
+ * is necessary because there is no way to uniquely identifying rules
+ * in the avtab so it is not possible to associate individual rules
+ * in the avtab with a conditional without saving them as part of
+ * the conditional. This means that the avtab with the conditional
+ * rules will not be saved but will be rebuilt on policy load.
+ */
+static int cond_write_av_list(struct policydb *p,
+			      struct cond_av_list *list, struct policy_file *fp)
+{
+	__le32 buf[1];
+	struct cond_av_list *cur_list;
+	u32 len;
+	int rc;
+
+	len = 0;
+	for (cur_list = list; cur_list != NULL; cur_list = cur_list->next)
+		len++;
+
+	buf[0] = cpu_to_le32(len);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+
+	if (len == 0)
+		return 0;
+
+	for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) {
+		rc = avtab_write_item(p, cur_list->node, fp);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static int cond_write_node(struct policydb *p, struct cond_node *node,
+		    struct policy_file *fp)
+{
+	struct cond_expr *cur_expr;
+	__le32 buf[2];
+	int rc;
+	u32 len = 0;
+
+	buf[0] = cpu_to_le32(node->cur_state);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+
+	for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next)
+		len++;
+
+	buf[0] = cpu_to_le32(len);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+
+	for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) {
+		buf[0] = cpu_to_le32(cur_expr->expr_type);
+		buf[1] = cpu_to_le32(cur_expr->bool);
+		rc = put_entry(buf, sizeof(u32), 2, fp);
+		if (rc)
+			return rc;
+	}
+
+	rc = cond_write_av_list(p, node->true_list, fp);
+	if (rc)
+		return rc;
+	rc = cond_write_av_list(p, node->false_list, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+int cond_write_list(struct policydb *p, struct cond_node *list, void *fp)
+{
+	struct cond_node *cur;
+	u32 len;
+	__le32 buf[1];
+	int rc;
+
+	len = 0;
+	for (cur = list; cur != NULL; cur = cur->next)
+		len++;
+	buf[0] = cpu_to_le32(len);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+
+	for (cur = list; cur != NULL; cur = cur->next) {
+		rc = cond_write_node(p, cur, fp);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+void cond_compute_xperms(struct avtab *ctab, struct avtab_key *key,
+		struct extended_perms_decision *xpermd)
+{
+	struct avtab_node *node;
+
+	if (!ctab || !key || !xpermd)
+		return;
+
+	for (node = avtab_search_node(ctab, key); node;
+			node = avtab_search_node_next(node, key->specified)) {
+		if (node->key.specified & AVTAB_ENABLED)
+			services_compute_xperms_decision(xpermd, node);
+	}
+	return;
+
+}
+/* Determine whether additional permissions are granted by the conditional
+ * av table, and if so, add them to the result
+ */
+void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
+		struct av_decision *avd, struct extended_perms *xperms)
+{
+	struct avtab_node *node;
+
+	if (!ctab || !key || !avd)
+		return;
+
+	for (node = avtab_search_node(ctab, key); node;
+				node = avtab_search_node_next(node, key->specified)) {
+		if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) ==
+		    (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED)))
+			avd->allowed |= node->datum.u.data;
+		if ((u16)(AVTAB_AUDITDENY|AVTAB_ENABLED) ==
+		    (node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED)))
+			/* Since a '0' in an auditdeny mask represents a
+			 * permission we do NOT want to audit (dontaudit), we use
+			 * the '&' operand to ensure that all '0's in the mask
+			 * are retained (much unlike the allow and auditallow cases).
+			 */
+			avd->auditdeny &= node->datum.u.data;
+		if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) ==
+		    (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)))
+			avd->auditallow |= node->datum.u.data;
+		if (xperms && (node->key.specified & AVTAB_ENABLED) &&
+				(node->key.specified & AVTAB_XPERMS))
+			services_compute_xperms_drivers(xperms, node);
+	}
+	return;
+}
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
new file mode 100644
index 0000000..ddb43e7
--- /dev/null
+++ b/security/selinux/ss/conditional.h
@@ -0,0 +1,82 @@
+/* Authors: Karl MacMillan <kmacmillan@tresys.com>
+ *          Frank Mayer <mayerf@tresys.com>
+ *
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ *	This program is free software; you can redistribute it and/or modify
+ *  	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation, version 2.
+ */
+
+#ifndef _CONDITIONAL_H_
+#define _CONDITIONAL_H_
+
+#include "avtab.h"
+#include "symtab.h"
+#include "policydb.h"
+#include "../include/conditional.h"
+
+#define COND_EXPR_MAXDEPTH 10
+
+/*
+ * A conditional expression is a list of operators and operands
+ * in reverse polish notation.
+ */
+struct cond_expr {
+#define COND_BOOL	1 /* plain bool */
+#define COND_NOT	2 /* !bool */
+#define COND_OR		3 /* bool || bool */
+#define COND_AND	4 /* bool && bool */
+#define COND_XOR	5 /* bool ^ bool */
+#define COND_EQ		6 /* bool == bool */
+#define COND_NEQ	7 /* bool != bool */
+#define COND_LAST	COND_NEQ
+	__u32 expr_type;
+	__u32 bool;
+	struct cond_expr *next;
+};
+
+/*
+ * Each cond_node contains a list of rules to be enabled/disabled
+ * depending on the current value of the conditional expression. This
+ * struct is for that list.
+ */
+struct cond_av_list {
+	struct avtab_node *node;
+	struct cond_av_list *next;
+};
+
+/*
+ * A cond node represents a conditional block in a policy. It
+ * contains a conditional expression, the current state of the expression,
+ * two lists of rules to enable/disable depending on the value of the
+ * expression (the true list corresponds to if and the false list corresponds
+ * to else)..
+ */
+struct cond_node {
+	int cur_state;
+	struct cond_expr *expr;
+	struct cond_av_list *true_list;
+	struct cond_av_list *false_list;
+	struct cond_node *next;
+};
+
+int cond_policydb_init(struct policydb *p);
+void cond_policydb_destroy(struct policydb *p);
+
+int cond_init_bool_indexes(struct policydb *p);
+int cond_destroy_bool(void *key, void *datum, void *p);
+
+int cond_index_bool(void *key, void *datum, void *datap);
+
+int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp);
+int cond_read_list(struct policydb *p, void *fp);
+int cond_write_bool(void *key, void *datum, void *ptr);
+int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
+
+void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
+		struct av_decision *avd, struct extended_perms *xperms);
+void cond_compute_xperms(struct avtab *ctab, struct avtab_key *key,
+		struct extended_perms_decision *xpermd);
+int evaluate_cond_node(struct policydb *p, struct cond_node *node);
+
+#endif /* _CONDITIONAL_H_ */
diff --git a/security/selinux/ss/constraint.h b/security/selinux/ss/constraint.h
new file mode 100644
index 0000000..96fd947
--- /dev/null
+++ b/security/selinux/ss/constraint.h
@@ -0,0 +1,62 @@
+/*
+ * A constraint is a condition that must be satisfied in
+ * order for one or more permissions to be granted.
+ * Constraints are used to impose additional restrictions
+ * beyond the type-based rules in `te' or the role-based
+ * transition rules in `rbac'.  Constraints are typically
+ * used to prevent a process from transitioning to a new user
+ * identity or role unless it is in a privileged type.
+ * Constraints are likewise typically used to prevent a
+ * process from labeling an object with a different user
+ * identity.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#ifndef _SS_CONSTRAINT_H_
+#define _SS_CONSTRAINT_H_
+
+#include "ebitmap.h"
+
+#define CEXPR_MAXDEPTH 5
+
+struct constraint_expr {
+#define CEXPR_NOT		1 /* not expr */
+#define CEXPR_AND		2 /* expr and expr */
+#define CEXPR_OR		3 /* expr or expr */
+#define CEXPR_ATTR		4 /* attr op attr */
+#define CEXPR_NAMES		5 /* attr op names */
+	u32 expr_type;		/* expression type */
+
+#define CEXPR_USER 1		/* user */
+#define CEXPR_ROLE 2		/* role */
+#define CEXPR_TYPE 4		/* type */
+#define CEXPR_TARGET 8		/* target if set, source otherwise */
+#define CEXPR_XTARGET 16	/* special 3rd target for validatetrans rule */
+#define CEXPR_L1L2 32		/* low level 1 vs. low level 2 */
+#define CEXPR_L1H2 64		/* low level 1 vs. high level 2 */
+#define CEXPR_H1L2 128		/* high level 1 vs. low level 2 */
+#define CEXPR_H1H2 256		/* high level 1 vs. high level 2 */
+#define CEXPR_L1H1 512		/* low level 1 vs. high level 1 */
+#define CEXPR_L2H2 1024		/* low level 2 vs. high level 2 */
+	u32 attr;		/* attribute */
+
+#define CEXPR_EQ     1		/* == or eq */
+#define CEXPR_NEQ    2		/* != */
+#define CEXPR_DOM    3		/* dom */
+#define CEXPR_DOMBY  4		/* domby  */
+#define CEXPR_INCOMP 5		/* incomp */
+	u32 op;			/* operator */
+
+	struct ebitmap names;	/* names */
+	struct type_set *type_names;
+
+	struct constraint_expr *next;   /* next expression */
+};
+
+struct constraint_node {
+	u32 permissions;	/* constrained permissions */
+	struct constraint_expr *expr;	/* constraint on permissions */
+	struct constraint_node *next;	/* next constraint */
+};
+
+#endif	/* _SS_CONSTRAINT_H_ */
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
new file mode 100644
index 0000000..212e347
--- /dev/null
+++ b/security/selinux/ss/context.h
@@ -0,0 +1,163 @@
+/*
+ * A security context is a set of security attributes
+ * associated with each subject and object controlled
+ * by the security policy.  Security contexts are
+  * externally represented as variable-length strings
+ * that can be interpreted by a user or application
+ * with an understanding of the security policy.
+ * Internally, the security server uses a simple
+ * structure.  This structure is private to the
+ * security server and can be changed without affecting
+ * clients of the security server.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#ifndef _SS_CONTEXT_H_
+#define _SS_CONTEXT_H_
+
+#include "ebitmap.h"
+#include "mls_types.h"
+#include "security.h"
+
+/*
+ * A security context consists of an authenticated user
+ * identity, a role, a type and a MLS range.
+ */
+struct context {
+	u32 user;
+	u32 role;
+	u32 type;
+	u32 len;        /* length of string in bytes */
+	struct mls_range range;
+	char *str;	/* string representation if context cannot be mapped. */
+};
+
+static inline void mls_context_init(struct context *c)
+{
+	memset(&c->range, 0, sizeof(c->range));
+}
+
+static inline int mls_context_cpy(struct context *dst, struct context *src)
+{
+	int rc;
+
+	dst->range.level[0].sens = src->range.level[0].sens;
+	rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
+	if (rc)
+		goto out;
+
+	dst->range.level[1].sens = src->range.level[1].sens;
+	rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
+	if (rc)
+		ebitmap_destroy(&dst->range.level[0].cat);
+out:
+	return rc;
+}
+
+/*
+ * Sets both levels in the MLS range of 'dst' to the low level of 'src'.
+ */
+static inline int mls_context_cpy_low(struct context *dst, struct context *src)
+{
+	int rc;
+
+	dst->range.level[0].sens = src->range.level[0].sens;
+	rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
+	if (rc)
+		goto out;
+
+	dst->range.level[1].sens = src->range.level[0].sens;
+	rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[0].cat);
+	if (rc)
+		ebitmap_destroy(&dst->range.level[0].cat);
+out:
+	return rc;
+}
+
+/*
+ * Sets both levels in the MLS range of 'dst' to the high level of 'src'.
+ */
+static inline int mls_context_cpy_high(struct context *dst, struct context *src)
+{
+	int rc;
+
+	dst->range.level[0].sens = src->range.level[1].sens;
+	rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[1].cat);
+	if (rc)
+		goto out;
+
+	dst->range.level[1].sens = src->range.level[1].sens;
+	rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
+	if (rc)
+		ebitmap_destroy(&dst->range.level[0].cat);
+out:
+	return rc;
+}
+
+static inline int mls_context_cmp(struct context *c1, struct context *c2)
+{
+	return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
+		ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
+		(c1->range.level[1].sens == c2->range.level[1].sens) &&
+		ebitmap_cmp(&c1->range.level[1].cat, &c2->range.level[1].cat));
+}
+
+static inline void mls_context_destroy(struct context *c)
+{
+	ebitmap_destroy(&c->range.level[0].cat);
+	ebitmap_destroy(&c->range.level[1].cat);
+	mls_context_init(c);
+}
+
+static inline void context_init(struct context *c)
+{
+	memset(c, 0, sizeof(*c));
+}
+
+static inline int context_cpy(struct context *dst, struct context *src)
+{
+	int rc;
+
+	dst->user = src->user;
+	dst->role = src->role;
+	dst->type = src->type;
+	if (src->str) {
+		dst->str = kstrdup(src->str, GFP_ATOMIC);
+		if (!dst->str)
+			return -ENOMEM;
+		dst->len = src->len;
+	} else {
+		dst->str = NULL;
+		dst->len = 0;
+	}
+	rc = mls_context_cpy(dst, src);
+	if (rc) {
+		kfree(dst->str);
+		return rc;
+	}
+	return 0;
+}
+
+static inline void context_destroy(struct context *c)
+{
+	c->user = c->role = c->type = 0;
+	kfree(c->str);
+	c->str = NULL;
+	c->len = 0;
+	mls_context_destroy(c);
+}
+
+static inline int context_cmp(struct context *c1, struct context *c2)
+{
+	if (c1->len && c2->len)
+		return (c1->len == c2->len && !strcmp(c1->str, c2->str));
+	if (c1->len || c2->len)
+		return 0;
+	return ((c1->user == c2->user) &&
+		(c1->role == c2->role) &&
+		(c1->type == c2->type) &&
+		mls_context_cmp(c1, c2));
+}
+
+#endif	/* _SS_CONTEXT_H_ */
+
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
new file mode 100644
index 0000000..57644b1
--- /dev/null
+++ b/security/selinux/ss/ebitmap.c
@@ -0,0 +1,518 @@
+/*
+ * Implementation of the extensible bitmap type.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+/*
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ *      Added support to import/export the NetLabel category bitmap
+ *
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
+ */
+/*
+ * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *      Applied standard bit operations to improve bitmap scanning.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <net/netlabel.h>
+#include "ebitmap.h"
+#include "policydb.h"
+
+#define BITS_PER_U64	(sizeof(u64) * 8)
+
+int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2)
+{
+	struct ebitmap_node *n1, *n2;
+
+	if (e1->highbit != e2->highbit)
+		return 0;
+
+	n1 = e1->node;
+	n2 = e2->node;
+	while (n1 && n2 &&
+	       (n1->startbit == n2->startbit) &&
+	       !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) {
+		n1 = n1->next;
+		n2 = n2->next;
+	}
+
+	if (n1 || n2)
+		return 0;
+
+	return 1;
+}
+
+int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src)
+{
+	struct ebitmap_node *n, *new, *prev;
+
+	ebitmap_init(dst);
+	n = src->node;
+	prev = NULL;
+	while (n) {
+		new = kzalloc(sizeof(*new), GFP_ATOMIC);
+		if (!new) {
+			ebitmap_destroy(dst);
+			return -ENOMEM;
+		}
+		new->startbit = n->startbit;
+		memcpy(new->maps, n->maps, EBITMAP_SIZE / 8);
+		new->next = NULL;
+		if (prev)
+			prev->next = new;
+		else
+			dst->node = new;
+		prev = new;
+		n = n->next;
+	}
+
+	dst->highbit = src->highbit;
+	return 0;
+}
+
+#ifdef CONFIG_NETLABEL
+/**
+ * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap
+ * @ebmap: the ebitmap to export
+ * @catmap: the NetLabel category bitmap
+ *
+ * Description:
+ * Export a SELinux extensibile bitmap into a NetLabel category bitmap.
+ * Returns zero on success, negative values on error.
+ *
+ */
+int ebitmap_netlbl_export(struct ebitmap *ebmap,
+			  struct netlbl_lsm_catmap **catmap)
+{
+	struct ebitmap_node *e_iter = ebmap->node;
+	unsigned long e_map;
+	u32 offset;
+	unsigned int iter;
+	int rc;
+
+	if (e_iter == NULL) {
+		*catmap = NULL;
+		return 0;
+	}
+
+	if (*catmap != NULL)
+		netlbl_catmap_free(*catmap);
+	*catmap = NULL;
+
+	while (e_iter) {
+		offset = e_iter->startbit;
+		for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) {
+			e_map = e_iter->maps[iter];
+			if (e_map != 0) {
+				rc = netlbl_catmap_setlong(catmap,
+							   offset,
+							   e_map,
+							   GFP_ATOMIC);
+				if (rc != 0)
+					goto netlbl_export_failure;
+			}
+			offset += EBITMAP_UNIT_SIZE;
+		}
+		e_iter = e_iter->next;
+	}
+
+	return 0;
+
+netlbl_export_failure:
+	netlbl_catmap_free(*catmap);
+	return -ENOMEM;
+}
+
+/**
+ * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap
+ * @ebmap: the ebitmap to import
+ * @catmap: the NetLabel category bitmap
+ *
+ * Description:
+ * Import a NetLabel category bitmap into a SELinux extensibile bitmap.
+ * Returns zero on success, negative values on error.
+ *
+ */
+int ebitmap_netlbl_import(struct ebitmap *ebmap,
+			  struct netlbl_lsm_catmap *catmap)
+{
+	int rc;
+	struct ebitmap_node *e_iter = NULL;
+	struct ebitmap_node *e_prev = NULL;
+	u32 offset = 0, idx;
+	unsigned long bitmap;
+
+	for (;;) {
+		rc = netlbl_catmap_getlong(catmap, &offset, &bitmap);
+		if (rc < 0)
+			goto netlbl_import_failure;
+		if (offset == (u32)-1)
+			return 0;
+
+		/* don't waste ebitmap space if the netlabel bitmap is empty */
+		if (bitmap == 0) {
+			offset += EBITMAP_UNIT_SIZE;
+			continue;
+		}
+
+		if (e_iter == NULL ||
+		    offset >= e_iter->startbit + EBITMAP_SIZE) {
+			e_prev = e_iter;
+			e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC);
+			if (e_iter == NULL)
+				goto netlbl_import_failure;
+			e_iter->startbit = offset & ~(EBITMAP_SIZE - 1);
+			if (e_prev == NULL)
+				ebmap->node = e_iter;
+			else
+				e_prev->next = e_iter;
+			ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
+		}
+
+		/* offset will always be aligned to an unsigned long */
+		idx = EBITMAP_NODE_INDEX(e_iter, offset);
+		e_iter->maps[idx] = bitmap;
+
+		/* next */
+		offset += EBITMAP_UNIT_SIZE;
+	}
+
+	/* NOTE: we should never reach this return */
+	return 0;
+
+netlbl_import_failure:
+	ebitmap_destroy(ebmap);
+	return -ENOMEM;
+}
+#endif /* CONFIG_NETLABEL */
+
+/*
+ * Check to see if all the bits set in e2 are also set in e1. Optionally,
+ * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
+ * last_e2bit.
+ */
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
+{
+	struct ebitmap_node *n1, *n2;
+	int i;
+
+	if (e1->highbit < e2->highbit)
+		return 0;
+
+	n1 = e1->node;
+	n2 = e2->node;
+
+	while (n1 && n2 && (n1->startbit <= n2->startbit)) {
+		if (n1->startbit < n2->startbit) {
+			n1 = n1->next;
+			continue;
+		}
+		for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
+			i--;	/* Skip trailing NULL map entries */
+		if (last_e2bit && (i >= 0)) {
+			u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
+					 __fls(n2->maps[i]);
+			if (lastsetbit > last_e2bit)
+				return 0;
+		}
+
+		while (i >= 0) {
+			if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
+				return 0;
+			i--;
+		}
+
+		n1 = n1->next;
+		n2 = n2->next;
+	}
+
+	if (n2)
+		return 0;
+
+	return 1;
+}
+
+int ebitmap_get_bit(struct ebitmap *e, unsigned long bit)
+{
+	struct ebitmap_node *n;
+
+	if (e->highbit < bit)
+		return 0;
+
+	n = e->node;
+	while (n && (n->startbit <= bit)) {
+		if ((n->startbit + EBITMAP_SIZE) > bit)
+			return ebitmap_node_get_bit(n, bit);
+		n = n->next;
+	}
+
+	return 0;
+}
+
+int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
+{
+	struct ebitmap_node *n, *prev, *new;
+
+	prev = NULL;
+	n = e->node;
+	while (n && n->startbit <= bit) {
+		if ((n->startbit + EBITMAP_SIZE) > bit) {
+			if (value) {
+				ebitmap_node_set_bit(n, bit);
+			} else {
+				unsigned int s;
+
+				ebitmap_node_clr_bit(n, bit);
+
+				s = find_first_bit(n->maps, EBITMAP_SIZE);
+				if (s < EBITMAP_SIZE)
+					return 0;
+
+				/* drop this node from the bitmap */
+				if (!n->next) {
+					/*
+					 * this was the highest map
+					 * within the bitmap
+					 */
+					if (prev)
+						e->highbit = prev->startbit
+							     + EBITMAP_SIZE;
+					else
+						e->highbit = 0;
+				}
+				if (prev)
+					prev->next = n->next;
+				else
+					e->node = n->next;
+				kfree(n);
+			}
+			return 0;
+		}
+		prev = n;
+		n = n->next;
+	}
+
+	if (!value)
+		return 0;
+
+	new = kzalloc(sizeof(*new), GFP_ATOMIC);
+	if (!new)
+		return -ENOMEM;
+
+	new->startbit = bit - (bit % EBITMAP_SIZE);
+	ebitmap_node_set_bit(new, bit);
+
+	if (!n)
+		/* this node will be the highest map within the bitmap */
+		e->highbit = new->startbit + EBITMAP_SIZE;
+
+	if (prev) {
+		new->next = prev->next;
+		prev->next = new;
+	} else {
+		new->next = e->node;
+		e->node = new;
+	}
+
+	return 0;
+}
+
+void ebitmap_destroy(struct ebitmap *e)
+{
+	struct ebitmap_node *n, *temp;
+
+	if (!e)
+		return;
+
+	n = e->node;
+	while (n) {
+		temp = n;
+		n = n->next;
+		kfree(temp);
+	}
+
+	e->highbit = 0;
+	e->node = NULL;
+	return;
+}
+
+int ebitmap_read(struct ebitmap *e, void *fp)
+{
+	struct ebitmap_node *n = NULL;
+	u32 mapunit, count, startbit, index;
+	u64 map;
+	__le32 buf[3];
+	int rc, i;
+
+	ebitmap_init(e);
+
+	rc = next_entry(buf, fp, sizeof buf);
+	if (rc < 0)
+		goto out;
+
+	mapunit = le32_to_cpu(buf[0]);
+	e->highbit = le32_to_cpu(buf[1]);
+	count = le32_to_cpu(buf[2]);
+
+	if (mapunit != BITS_PER_U64) {
+		printk(KERN_ERR "SELinux: ebitmap: map size %u does not "
+		       "match my size %Zd (high bit was %d)\n",
+		       mapunit, BITS_PER_U64, e->highbit);
+		goto bad;
+	}
+
+	/* round up e->highbit */
+	e->highbit += EBITMAP_SIZE - 1;
+	e->highbit -= (e->highbit % EBITMAP_SIZE);
+
+	if (!e->highbit) {
+		e->node = NULL;
+		goto ok;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = next_entry(&startbit, fp, sizeof(u32));
+		if (rc < 0) {
+			printk(KERN_ERR "SELinux: ebitmap: truncated map\n");
+			goto bad;
+		}
+		startbit = le32_to_cpu(startbit);
+
+		if (startbit & (mapunit - 1)) {
+			printk(KERN_ERR "SELinux: ebitmap start bit (%d) is "
+			       "not a multiple of the map unit size (%u)\n",
+			       startbit, mapunit);
+			goto bad;
+		}
+		if (startbit > e->highbit - mapunit) {
+			printk(KERN_ERR "SELinux: ebitmap start bit (%d) is "
+			       "beyond the end of the bitmap (%u)\n",
+			       startbit, (e->highbit - mapunit));
+			goto bad;
+		}
+
+		if (!n || startbit >= n->startbit + EBITMAP_SIZE) {
+			struct ebitmap_node *tmp;
+			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+			if (!tmp) {
+				printk(KERN_ERR
+				       "SELinux: ebitmap: out of memory\n");
+				rc = -ENOMEM;
+				goto bad;
+			}
+			/* round down */
+			tmp->startbit = startbit - (startbit % EBITMAP_SIZE);
+			if (n)
+				n->next = tmp;
+			else
+				e->node = tmp;
+			n = tmp;
+		} else if (startbit <= n->startbit) {
+			printk(KERN_ERR "SELinux: ebitmap: start bit %d"
+			       " comes after start bit %d\n",
+			       startbit, n->startbit);
+			goto bad;
+		}
+
+		rc = next_entry(&map, fp, sizeof(u64));
+		if (rc < 0) {
+			printk(KERN_ERR "SELinux: ebitmap: truncated map\n");
+			goto bad;
+		}
+		map = le64_to_cpu(map);
+
+		index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE;
+		while (map) {
+			n->maps[index++] = map & (-1UL);
+			map = EBITMAP_SHIFT_UNIT_SIZE(map);
+		}
+	}
+ok:
+	rc = 0;
+out:
+	return rc;
+bad:
+	if (!rc)
+		rc = -EINVAL;
+	ebitmap_destroy(e);
+	goto out;
+}
+
+int ebitmap_write(struct ebitmap *e, void *fp)
+{
+	struct ebitmap_node *n;
+	u32 count;
+	__le32 buf[3];
+	u64 map;
+	int bit, last_bit, last_startbit, rc;
+
+	buf[0] = cpu_to_le32(BITS_PER_U64);
+
+	count = 0;
+	last_bit = 0;
+	last_startbit = -1;
+	ebitmap_for_each_positive_bit(e, n, bit) {
+		if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
+			count++;
+			last_startbit = rounddown(bit, BITS_PER_U64);
+		}
+		last_bit = roundup(bit + 1, BITS_PER_U64);
+	}
+	buf[1] = cpu_to_le32(last_bit);
+	buf[2] = cpu_to_le32(count);
+
+	rc = put_entry(buf, sizeof(u32), 3, fp);
+	if (rc)
+		return rc;
+
+	map = 0;
+	last_startbit = INT_MIN;
+	ebitmap_for_each_positive_bit(e, n, bit) {
+		if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
+			__le64 buf64[1];
+
+			/* this is the very first bit */
+			if (!map) {
+				last_startbit = rounddown(bit, BITS_PER_U64);
+				map = (u64)1 << (bit - last_startbit);
+				continue;
+			}
+
+			/* write the last node */
+			buf[0] = cpu_to_le32(last_startbit);
+			rc = put_entry(buf, sizeof(u32), 1, fp);
+			if (rc)
+				return rc;
+
+			buf64[0] = cpu_to_le64(map);
+			rc = put_entry(buf64, sizeof(u64), 1, fp);
+			if (rc)
+				return rc;
+
+			/* set up for the next node */
+			map = 0;
+			last_startbit = rounddown(bit, BITS_PER_U64);
+		}
+		map |= (u64)1 << (bit - last_startbit);
+	}
+	/* write the last node */
+	if (map) {
+		__le64 buf64[1];
+
+		/* write the last node */
+		buf[0] = cpu_to_le32(last_startbit);
+		rc = put_entry(buf, sizeof(u32), 1, fp);
+		if (rc)
+			return rc;
+
+		buf64[0] = cpu_to_le64(map);
+		rc = put_entry(buf64, sizeof(u64), 1, fp);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
new file mode 100644
index 0000000..9637b8c
--- /dev/null
+++ b/security/selinux/ss/ebitmap.h
@@ -0,0 +1,151 @@
+/*
+ * An extensible bitmap is a bitmap that supports an
+ * arbitrary number of bits.  Extensible bitmaps are
+ * used to represent sets of values, such as types,
+ * roles, categories, and classes.
+ *
+ * Each extensible bitmap is implemented as a linked
+ * list of bitmap nodes, where each bitmap node has
+ * an explicitly specified starting bit position within
+ * the total bitmap.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#ifndef _SS_EBITMAP_H_
+#define _SS_EBITMAP_H_
+
+#include <net/netlabel.h>
+
+#ifdef CONFIG_64BIT
+#define	EBITMAP_NODE_SIZE	64
+#else
+#define	EBITMAP_NODE_SIZE	32
+#endif
+
+#define EBITMAP_UNIT_NUMS	((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
+					/ sizeof(unsigned long))
+#define EBITMAP_UNIT_SIZE	BITS_PER_LONG
+#define EBITMAP_SIZE		(EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
+#define EBITMAP_BIT		1ULL
+#define EBITMAP_SHIFT_UNIT_SIZE(x)					\
+	(((x) >> EBITMAP_UNIT_SIZE / 2) >> EBITMAP_UNIT_SIZE / 2)
+
+struct ebitmap_node {
+	struct ebitmap_node *next;
+	unsigned long maps[EBITMAP_UNIT_NUMS];
+	u32 startbit;
+};
+
+struct ebitmap {
+	struct ebitmap_node *node;	/* first node in the bitmap */
+	u32 highbit;	/* highest position in the total bitmap */
+};
+
+#define ebitmap_length(e) ((e)->highbit)
+
+static inline unsigned int ebitmap_start_positive(struct ebitmap *e,
+						  struct ebitmap_node **n)
+{
+	unsigned int ofs;
+
+	for (*n = e->node; *n; *n = (*n)->next) {
+		ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
+		if (ofs < EBITMAP_SIZE)
+			return (*n)->startbit + ofs;
+	}
+	return ebitmap_length(e);
+}
+
+static inline void ebitmap_init(struct ebitmap *e)
+{
+	memset(e, 0, sizeof(*e));
+}
+
+static inline unsigned int ebitmap_next_positive(struct ebitmap *e,
+						 struct ebitmap_node **n,
+						 unsigned int bit)
+{
+	unsigned int ofs;
+
+	ofs = find_next_bit((*n)->maps, EBITMAP_SIZE, bit - (*n)->startbit + 1);
+	if (ofs < EBITMAP_SIZE)
+		return ofs + (*n)->startbit;
+
+	for (*n = (*n)->next; *n; *n = (*n)->next) {
+		ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
+		if (ofs < EBITMAP_SIZE)
+			return ofs + (*n)->startbit;
+	}
+	return ebitmap_length(e);
+}
+
+#define EBITMAP_NODE_INDEX(node, bit)	\
+	(((bit) - (node)->startbit) / EBITMAP_UNIT_SIZE)
+#define EBITMAP_NODE_OFFSET(node, bit)	\
+	(((bit) - (node)->startbit) % EBITMAP_UNIT_SIZE)
+
+static inline int ebitmap_node_get_bit(struct ebitmap_node *n,
+				       unsigned int bit)
+{
+	unsigned int index = EBITMAP_NODE_INDEX(n, bit);
+	unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
+
+	BUG_ON(index >= EBITMAP_UNIT_NUMS);
+	if ((n->maps[index] & (EBITMAP_BIT << ofs)))
+		return 1;
+	return 0;
+}
+
+static inline void ebitmap_node_set_bit(struct ebitmap_node *n,
+					unsigned int bit)
+{
+	unsigned int index = EBITMAP_NODE_INDEX(n, bit);
+	unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
+
+	BUG_ON(index >= EBITMAP_UNIT_NUMS);
+	n->maps[index] |= (EBITMAP_BIT << ofs);
+}
+
+static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
+					unsigned int bit)
+{
+	unsigned int index = EBITMAP_NODE_INDEX(n, bit);
+	unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
+
+	BUG_ON(index >= EBITMAP_UNIT_NUMS);
+	n->maps[index] &= ~(EBITMAP_BIT << ofs);
+}
+
+#define ebitmap_for_each_positive_bit(e, n, bit)	\
+	for (bit = ebitmap_start_positive(e, &n);	\
+	     bit < ebitmap_length(e);			\
+	     bit = ebitmap_next_positive(e, &n, bit))	\
+
+int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
+int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
+int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
+int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
+void ebitmap_destroy(struct ebitmap *e);
+int ebitmap_read(struct ebitmap *e, void *fp);
+int ebitmap_write(struct ebitmap *e, void *fp);
+
+#ifdef CONFIG_NETLABEL
+int ebitmap_netlbl_export(struct ebitmap *ebmap,
+			  struct netlbl_lsm_catmap **catmap);
+int ebitmap_netlbl_import(struct ebitmap *ebmap,
+			  struct netlbl_lsm_catmap *catmap);
+#else
+static inline int ebitmap_netlbl_export(struct ebitmap *ebmap,
+					struct netlbl_lsm_catmap **catmap)
+{
+	return -ENOMEM;
+}
+static inline int ebitmap_netlbl_import(struct ebitmap *ebmap,
+					struct netlbl_lsm_catmap *catmap)
+{
+	return -ENOMEM;
+}
+#endif
+
+#endif	/* _SS_EBITMAP_H_ */
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
new file mode 100644
index 0000000..2cc4961
--- /dev/null
+++ b/security/selinux/ss/hashtab.c
@@ -0,0 +1,168 @@
+/*
+ * Implementation of the hash table type.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include "hashtab.h"
+
+struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
+			       int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
+			       u32 size)
+{
+	struct hashtab *p;
+	u32 i;
+
+	p = kzalloc(sizeof(*p), GFP_KERNEL);
+	if (p == NULL)
+		return p;
+
+	p->size = size;
+	p->nel = 0;
+	p->hash_value = hash_value;
+	p->keycmp = keycmp;
+	p->htable = kmalloc(sizeof(*(p->htable)) * size, GFP_KERNEL);
+	if (p->htable == NULL) {
+		kfree(p);
+		return NULL;
+	}
+
+	for (i = 0; i < size; i++)
+		p->htable[i] = NULL;
+
+	return p;
+}
+
+int hashtab_insert(struct hashtab *h, void *key, void *datum)
+{
+	u32 hvalue;
+	struct hashtab_node *prev, *cur, *newnode;
+
+	cond_resched();
+
+	if (!h || h->nel == HASHTAB_MAX_NODES)
+		return -EINVAL;
+
+	hvalue = h->hash_value(h, key);
+	prev = NULL;
+	cur = h->htable[hvalue];
+	while (cur && h->keycmp(h, key, cur->key) > 0) {
+		prev = cur;
+		cur = cur->next;
+	}
+
+	if (cur && (h->keycmp(h, key, cur->key) == 0))
+		return -EEXIST;
+
+	newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+	if (newnode == NULL)
+		return -ENOMEM;
+	newnode->key = key;
+	newnode->datum = datum;
+	if (prev) {
+		newnode->next = prev->next;
+		prev->next = newnode;
+	} else {
+		newnode->next = h->htable[hvalue];
+		h->htable[hvalue] = newnode;
+	}
+
+	h->nel++;
+	return 0;
+}
+
+void *hashtab_search(struct hashtab *h, const void *key)
+{
+	u32 hvalue;
+	struct hashtab_node *cur;
+
+	if (!h)
+		return NULL;
+
+	hvalue = h->hash_value(h, key);
+	cur = h->htable[hvalue];
+	while (cur && h->keycmp(h, key, cur->key) > 0)
+		cur = cur->next;
+
+	if (cur == NULL || (h->keycmp(h, key, cur->key) != 0))
+		return NULL;
+
+	return cur->datum;
+}
+
+void hashtab_destroy(struct hashtab *h)
+{
+	u32 i;
+	struct hashtab_node *cur, *temp;
+
+	if (!h)
+		return;
+
+	for (i = 0; i < h->size; i++) {
+		cur = h->htable[i];
+		while (cur) {
+			temp = cur;
+			cur = cur->next;
+			kfree(temp);
+		}
+		h->htable[i] = NULL;
+	}
+
+	kfree(h->htable);
+	h->htable = NULL;
+
+	kfree(h);
+}
+
+int hashtab_map(struct hashtab *h,
+		int (*apply)(void *k, void *d, void *args),
+		void *args)
+{
+	u32 i;
+	int ret;
+	struct hashtab_node *cur;
+
+	if (!h)
+		return 0;
+
+	for (i = 0; i < h->size; i++) {
+		cur = h->htable[i];
+		while (cur) {
+			ret = apply(cur->key, cur->datum, args);
+			if (ret)
+				return ret;
+			cur = cur->next;
+		}
+	}
+	return 0;
+}
+
+
+void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
+{
+	u32 i, chain_len, slots_used, max_chain_len;
+	struct hashtab_node *cur;
+
+	slots_used = 0;
+	max_chain_len = 0;
+	for (slots_used = max_chain_len = i = 0; i < h->size; i++) {
+		cur = h->htable[i];
+		if (cur) {
+			slots_used++;
+			chain_len = 0;
+			while (cur) {
+				chain_len++;
+				cur = cur->next;
+			}
+
+			if (chain_len > max_chain_len)
+				max_chain_len = chain_len;
+		}
+	}
+
+	info->slots_used = slots_used;
+	info->max_chain_len = max_chain_len;
+}
diff --git a/security/selinux/ss/hashtab.h b/security/selinux/ss/hashtab.h
new file mode 100644
index 0000000..953872c
--- /dev/null
+++ b/security/selinux/ss/hashtab.h
@@ -0,0 +1,87 @@
+/*
+ * A hash table (hashtab) maintains associations between
+ * key values and datum values.  The type of the key values
+ * and the type of the datum values is arbitrary.  The
+ * functions for hash computation and key comparison are
+ * provided by the creator of the table.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#ifndef _SS_HASHTAB_H_
+#define _SS_HASHTAB_H_
+
+#define HASHTAB_MAX_NODES	0xffffffff
+
+struct hashtab_node {
+	void *key;
+	void *datum;
+	struct hashtab_node *next;
+};
+
+struct hashtab {
+	struct hashtab_node **htable;	/* hash table */
+	u32 size;			/* number of slots in hash table */
+	u32 nel;			/* number of elements in hash table */
+	u32 (*hash_value)(struct hashtab *h, const void *key);
+					/* hash function */
+	int (*keycmp)(struct hashtab *h, const void *key1, const void *key2);
+					/* key comparison function */
+};
+
+struct hashtab_info {
+	u32 slots_used;
+	u32 max_chain_len;
+};
+
+/*
+ * Creates a new hash table with the specified characteristics.
+ *
+ * Returns NULL if insufficent space is available or
+ * the new hash table otherwise.
+ */
+struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
+			       int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
+			       u32 size);
+
+/*
+ * Inserts the specified (key, datum) pair into the specified hash table.
+ *
+ * Returns -ENOMEM on memory allocation error,
+ * -EEXIST if there is already an entry with the same key,
+ * -EINVAL for general errors or
+  0 otherwise.
+ */
+int hashtab_insert(struct hashtab *h, void *k, void *d);
+
+/*
+ * Searches for the entry with the specified key in the hash table.
+ *
+ * Returns NULL if no entry has the specified key or
+ * the datum of the entry otherwise.
+ */
+void *hashtab_search(struct hashtab *h, const void *k);
+
+/*
+ * Destroys the specified hash table.
+ */
+void hashtab_destroy(struct hashtab *h);
+
+/*
+ * Applies the specified apply function to (key,datum,args)
+ * for each entry in the specified hash table.
+ *
+ * The order in which the function is applied to the entries
+ * is dependent upon the internal structure of the hash table.
+ *
+ * If apply returns a non-zero status, then hashtab_map will cease
+ * iterating through the hash table and will propagate the error
+ * return to its caller.
+ */
+int hashtab_map(struct hashtab *h,
+		int (*apply)(void *k, void *d, void *args),
+		void *args);
+
+/* Fill info with some hash table statistics */
+void hashtab_stat(struct hashtab *h, struct hashtab_info *info);
+
+#endif	/* _SS_HASHTAB_H */
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
new file mode 100644
index 0000000..e108884
--- /dev/null
+++ b/security/selinux/ss/mls.c
@@ -0,0 +1,668 @@
+/*
+ * Implementation of the multi-level security (MLS) policy.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+/*
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ *	Support for enhanced MLS infrastructure.
+ *
+ * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
+ */
+/*
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ *      Added support to import/export the MLS label from NetLabel
+ *
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <net/netlabel.h>
+#include "sidtab.h"
+#include "mls.h"
+#include "policydb.h"
+#include "services.h"
+
+/*
+ * Return the length in bytes for the MLS fields of the
+ * security context string representation of `context'.
+ */
+int mls_compute_context_len(struct context *context)
+{
+	int i, l, len, head, prev;
+	char *nm;
+	struct ebitmap *e;
+	struct ebitmap_node *node;
+
+	if (!policydb.mls_enabled)
+		return 0;
+
+	len = 1; /* for the beginning ":" */
+	for (l = 0; l < 2; l++) {
+		int index_sens = context->range.level[l].sens;
+		len += strlen(sym_name(&policydb, SYM_LEVELS, index_sens - 1));
+
+		/* categories */
+		head = -2;
+		prev = -2;
+		e = &context->range.level[l].cat;
+		ebitmap_for_each_positive_bit(e, node, i) {
+			if (i - prev > 1) {
+				/* one or more negative bits are skipped */
+				if (head != prev) {
+					nm = sym_name(&policydb, SYM_CATS, prev);
+					len += strlen(nm) + 1;
+				}
+				nm = sym_name(&policydb, SYM_CATS, i);
+				len += strlen(nm) + 1;
+				head = i;
+			}
+			prev = i;
+		}
+		if (prev != head) {
+			nm = sym_name(&policydb, SYM_CATS, prev);
+			len += strlen(nm) + 1;
+		}
+		if (l == 0) {
+			if (mls_level_eq(&context->range.level[0],
+					 &context->range.level[1]))
+				break;
+			else
+				len++;
+		}
+	}
+
+	return len;
+}
+
+/*
+ * Write the security context string representation of
+ * the MLS fields of `context' into the string `*scontext'.
+ * Update `*scontext' to point to the end of the MLS fields.
+ */
+void mls_sid_to_context(struct context *context,
+			char **scontext)
+{
+	char *scontextp, *nm;
+	int i, l, head, prev;
+	struct ebitmap *e;
+	struct ebitmap_node *node;
+
+	if (!policydb.mls_enabled)
+		return;
+
+	scontextp = *scontext;
+
+	*scontextp = ':';
+	scontextp++;
+
+	for (l = 0; l < 2; l++) {
+		strcpy(scontextp, sym_name(&policydb, SYM_LEVELS,
+					   context->range.level[l].sens - 1));
+		scontextp += strlen(scontextp);
+
+		/* categories */
+		head = -2;
+		prev = -2;
+		e = &context->range.level[l].cat;
+		ebitmap_for_each_positive_bit(e, node, i) {
+			if (i - prev > 1) {
+				/* one or more negative bits are skipped */
+				if (prev != head) {
+					if (prev - head > 1)
+						*scontextp++ = '.';
+					else
+						*scontextp++ = ',';
+					nm = sym_name(&policydb, SYM_CATS, prev);
+					strcpy(scontextp, nm);
+					scontextp += strlen(nm);
+				}
+				if (prev < 0)
+					*scontextp++ = ':';
+				else
+					*scontextp++ = ',';
+				nm = sym_name(&policydb, SYM_CATS, i);
+				strcpy(scontextp, nm);
+				scontextp += strlen(nm);
+				head = i;
+			}
+			prev = i;
+		}
+
+		if (prev != head) {
+			if (prev - head > 1)
+				*scontextp++ = '.';
+			else
+				*scontextp++ = ',';
+			nm = sym_name(&policydb, SYM_CATS, prev);
+			strcpy(scontextp, nm);
+			scontextp += strlen(nm);
+		}
+
+		if (l == 0) {
+			if (mls_level_eq(&context->range.level[0],
+					 &context->range.level[1]))
+				break;
+			else
+				*scontextp++ = '-';
+		}
+	}
+
+	*scontext = scontextp;
+	return;
+}
+
+int mls_level_isvalid(struct policydb *p, struct mls_level *l)
+{
+	struct level_datum *levdatum;
+
+	if (!l->sens || l->sens > p->p_levels.nprim)
+		return 0;
+	levdatum = hashtab_search(p->p_levels.table,
+				  sym_name(p, SYM_LEVELS, l->sens - 1));
+	if (!levdatum)
+		return 0;
+
+	/*
+	 * Return 1 iff all the bits set in l->cat are also be set in
+	 * levdatum->level->cat and no bit in l->cat is larger than
+	 * p->p_cats.nprim.
+	 */
+	return ebitmap_contains(&levdatum->level->cat, &l->cat,
+				p->p_cats.nprim);
+}
+
+int mls_range_isvalid(struct policydb *p, struct mls_range *r)
+{
+	return (mls_level_isvalid(p, &r->level[0]) &&
+		mls_level_isvalid(p, &r->level[1]) &&
+		mls_level_dom(&r->level[1], &r->level[0]));
+}
+
+/*
+ * Return 1 if the MLS fields in the security context
+ * structure `c' are valid.  Return 0 otherwise.
+ */
+int mls_context_isvalid(struct policydb *p, struct context *c)
+{
+	struct user_datum *usrdatum;
+
+	if (!p->mls_enabled)
+		return 1;
+
+	if (!mls_range_isvalid(p, &c->range))
+		return 0;
+
+	if (c->role == OBJECT_R_VAL)
+		return 1;
+
+	/*
+	 * User must be authorized for the MLS range.
+	 */
+	if (!c->user || c->user > p->p_users.nprim)
+		return 0;
+	usrdatum = p->user_val_to_struct[c->user - 1];
+	if (!mls_range_contains(usrdatum->range, c->range))
+		return 0; /* user may not be associated with range */
+
+	return 1;
+}
+
+/*
+ * Set the MLS fields in the security context structure
+ * `context' based on the string representation in
+ * the string `*scontext'.  Update `*scontext' to
+ * point to the end of the string representation of
+ * the MLS fields.
+ *
+ * This function modifies the string in place, inserting
+ * NULL characters to terminate the MLS fields.
+ *
+ * If a def_sid is provided and no MLS field is present,
+ * copy the MLS field of the associated default context.
+ * Used for upgraded to MLS systems where objects may lack
+ * MLS fields.
+ *
+ * Policy read-lock must be held for sidtab lookup.
+ *
+ */
+int mls_context_to_sid(struct policydb *pol,
+		       char oldc,
+		       char **scontext,
+		       struct context *context,
+		       struct sidtab *s,
+		       u32 def_sid)
+{
+
+	char delim;
+	char *scontextp, *p, *rngptr;
+	struct level_datum *levdatum;
+	struct cat_datum *catdatum, *rngdatum;
+	int l, rc = -EINVAL;
+
+	if (!pol->mls_enabled) {
+		if (def_sid != SECSID_NULL && oldc)
+			*scontext += strlen(*scontext) + 1;
+		return 0;
+	}
+
+	/*
+	 * No MLS component to the security context, try and map to
+	 * default if provided.
+	 */
+	if (!oldc) {
+		struct context *defcon;
+
+		if (def_sid == SECSID_NULL)
+			goto out;
+
+		defcon = sidtab_search(s, def_sid);
+		if (!defcon)
+			goto out;
+
+		rc = mls_context_cpy(context, defcon);
+		goto out;
+	}
+
+	/* Extract low sensitivity. */
+	scontextp = p = *scontext;
+	while (*p && *p != ':' && *p != '-')
+		p++;
+
+	delim = *p;
+	if (delim != '\0')
+		*p++ = '\0';
+
+	for (l = 0; l < 2; l++) {
+		levdatum = hashtab_search(pol->p_levels.table, scontextp);
+		if (!levdatum) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		context->range.level[l].sens = levdatum->level->sens;
+
+		if (delim == ':') {
+			/* Extract category set. */
+			while (1) {
+				scontextp = p;
+				while (*p && *p != ',' && *p != '-')
+					p++;
+				delim = *p;
+				if (delim != '\0')
+					*p++ = '\0';
+
+				/* Separate into range if exists */
+				rngptr = strchr(scontextp, '.');
+				if (rngptr != NULL) {
+					/* Remove '.' */
+					*rngptr++ = '\0';
+				}
+
+				catdatum = hashtab_search(pol->p_cats.table,
+							  scontextp);
+				if (!catdatum) {
+					rc = -EINVAL;
+					goto out;
+				}
+
+				rc = ebitmap_set_bit(&context->range.level[l].cat,
+						     catdatum->value - 1, 1);
+				if (rc)
+					goto out;
+
+				/* If range, set all categories in range */
+				if (rngptr) {
+					int i;
+
+					rngdatum = hashtab_search(pol->p_cats.table, rngptr);
+					if (!rngdatum) {
+						rc = -EINVAL;
+						goto out;
+					}
+
+					if (catdatum->value >= rngdatum->value) {
+						rc = -EINVAL;
+						goto out;
+					}
+
+					for (i = catdatum->value; i < rngdatum->value; i++) {
+						rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1);
+						if (rc)
+							goto out;
+					}
+				}
+
+				if (delim != ',')
+					break;
+			}
+		}
+		if (delim == '-') {
+			/* Extract high sensitivity. */
+			scontextp = p;
+			while (*p && *p != ':')
+				p++;
+
+			delim = *p;
+			if (delim != '\0')
+				*p++ = '\0';
+		} else
+			break;
+	}
+
+	if (l == 0) {
+		context->range.level[1].sens = context->range.level[0].sens;
+		rc = ebitmap_cpy(&context->range.level[1].cat,
+				 &context->range.level[0].cat);
+		if (rc)
+			goto out;
+	}
+	*scontext = ++p;
+	rc = 0;
+out:
+	return rc;
+}
+
+/*
+ * Set the MLS fields in the security context structure
+ * `context' based on the string representation in
+ * the string `str'.  This function will allocate temporary memory with the
+ * given constraints of gfp_mask.
+ */
+int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
+{
+	char *tmpstr, *freestr;
+	int rc;
+
+	if (!policydb.mls_enabled)
+		return -EINVAL;
+
+	/* we need freestr because mls_context_to_sid will change
+	   the value of tmpstr */
+	tmpstr = freestr = kstrdup(str, gfp_mask);
+	if (!tmpstr) {
+		rc = -ENOMEM;
+	} else {
+		rc = mls_context_to_sid(&policydb, ':', &tmpstr, context,
+					NULL, SECSID_NULL);
+		kfree(freestr);
+	}
+
+	return rc;
+}
+
+/*
+ * Copies the MLS range `range' into `context'.
+ */
+int mls_range_set(struct context *context,
+				struct mls_range *range)
+{
+	int l, rc = 0;
+
+	/* Copy the MLS range into the  context */
+	for (l = 0; l < 2; l++) {
+		context->range.level[l].sens = range->level[l].sens;
+		rc = ebitmap_cpy(&context->range.level[l].cat,
+				 &range->level[l].cat);
+		if (rc)
+			break;
+	}
+
+	return rc;
+}
+
+int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
+			 struct context *usercon)
+{
+	if (policydb.mls_enabled) {
+		struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
+		struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
+		struct mls_level *user_low = &(user->range.level[0]);
+		struct mls_level *user_clr = &(user->range.level[1]);
+		struct mls_level *user_def = &(user->dfltlevel);
+		struct mls_level *usercon_sen = &(usercon->range.level[0]);
+		struct mls_level *usercon_clr = &(usercon->range.level[1]);
+
+		/* Honor the user's default level if we can */
+		if (mls_level_between(user_def, fromcon_sen, fromcon_clr))
+			*usercon_sen = *user_def;
+		else if (mls_level_between(fromcon_sen, user_def, user_clr))
+			*usercon_sen = *fromcon_sen;
+		else if (mls_level_between(fromcon_clr, user_low, user_def))
+			*usercon_sen = *user_low;
+		else
+			return -EINVAL;
+
+		/* Lower the clearance of available contexts
+		   if the clearance of "fromcon" is lower than
+		   that of the user's default clearance (but
+		   only if the "fromcon" clearance dominates
+		   the user's computed sensitivity level) */
+		if (mls_level_dom(user_clr, fromcon_clr))
+			*usercon_clr = *fromcon_clr;
+		else if (mls_level_dom(fromcon_clr, user_clr))
+			*usercon_clr = *user_clr;
+		else
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Convert the MLS fields in the security context
+ * structure `c' from the values specified in the
+ * policy `oldp' to the values specified in the policy `newp'.
+ */
+int mls_convert_context(struct policydb *oldp,
+			struct policydb *newp,
+			struct context *c)
+{
+	struct level_datum *levdatum;
+	struct cat_datum *catdatum;
+	struct ebitmap bitmap;
+	struct ebitmap_node *node;
+	int l, i;
+
+	if (!policydb.mls_enabled)
+		return 0;
+
+	for (l = 0; l < 2; l++) {
+		levdatum = hashtab_search(newp->p_levels.table,
+					  sym_name(oldp, SYM_LEVELS,
+						   c->range.level[l].sens - 1));
+
+		if (!levdatum)
+			return -EINVAL;
+		c->range.level[l].sens = levdatum->level->sens;
+
+		ebitmap_init(&bitmap);
+		ebitmap_for_each_positive_bit(&c->range.level[l].cat, node, i) {
+			int rc;
+
+			catdatum = hashtab_search(newp->p_cats.table,
+						  sym_name(oldp, SYM_CATS, i));
+			if (!catdatum)
+				return -EINVAL;
+			rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1);
+			if (rc)
+				return rc;
+
+			cond_resched();
+		}
+		ebitmap_destroy(&c->range.level[l].cat);
+		c->range.level[l].cat = bitmap;
+	}
+
+	return 0;
+}
+
+int mls_compute_sid(struct context *scontext,
+		    struct context *tcontext,
+		    u16 tclass,
+		    u32 specified,
+		    struct context *newcontext,
+		    bool sock)
+{
+	struct range_trans rtr;
+	struct mls_range *r;
+	struct class_datum *cladatum;
+	int default_range = 0;
+
+	if (!policydb.mls_enabled)
+		return 0;
+
+	switch (specified) {
+	case AVTAB_TRANSITION:
+		/* Look for a range transition rule. */
+		rtr.source_type = scontext->type;
+		rtr.target_type = tcontext->type;
+		rtr.target_class = tclass;
+		r = hashtab_search(policydb.range_tr, &rtr);
+		if (r)
+			return mls_range_set(newcontext, r);
+
+		if (tclass && tclass <= policydb.p_classes.nprim) {
+			cladatum = policydb.class_val_to_struct[tclass - 1];
+			if (cladatum)
+				default_range = cladatum->default_range;
+		}
+
+		switch (default_range) {
+		case DEFAULT_SOURCE_LOW:
+			return mls_context_cpy_low(newcontext, scontext);
+		case DEFAULT_SOURCE_HIGH:
+			return mls_context_cpy_high(newcontext, scontext);
+		case DEFAULT_SOURCE_LOW_HIGH:
+			return mls_context_cpy(newcontext, scontext);
+		case DEFAULT_TARGET_LOW:
+			return mls_context_cpy_low(newcontext, tcontext);
+		case DEFAULT_TARGET_HIGH:
+			return mls_context_cpy_high(newcontext, tcontext);
+		case DEFAULT_TARGET_LOW_HIGH:
+			return mls_context_cpy(newcontext, tcontext);
+		}
+
+		/* Fallthrough */
+	case AVTAB_CHANGE:
+		if ((tclass == policydb.process_class) || (sock == true))
+			/* Use the process MLS attributes. */
+			return mls_context_cpy(newcontext, scontext);
+		else
+			/* Use the process effective MLS attributes. */
+			return mls_context_cpy_low(newcontext, scontext);
+	case AVTAB_MEMBER:
+		/* Use the process effective MLS attributes. */
+		return mls_context_cpy_low(newcontext, scontext);
+
+	/* fall through */
+	}
+	return -EINVAL;
+}
+
+#ifdef CONFIG_NETLABEL
+/**
+ * mls_export_netlbl_lvl - Export the MLS sensitivity levels to NetLabel
+ * @context: the security context
+ * @secattr: the NetLabel security attributes
+ *
+ * Description:
+ * Given the security context copy the low MLS sensitivity level into the
+ * NetLabel MLS sensitivity level field.
+ *
+ */
+void mls_export_netlbl_lvl(struct context *context,
+			   struct netlbl_lsm_secattr *secattr)
+{
+	if (!policydb.mls_enabled)
+		return;
+
+	secattr->attr.mls.lvl = context->range.level[0].sens - 1;
+	secattr->flags |= NETLBL_SECATTR_MLS_LVL;
+}
+
+/**
+ * mls_import_netlbl_lvl - Import the NetLabel MLS sensitivity levels
+ * @context: the security context
+ * @secattr: the NetLabel security attributes
+ *
+ * Description:
+ * Given the security context and the NetLabel security attributes, copy the
+ * NetLabel MLS sensitivity level into the context.
+ *
+ */
+void mls_import_netlbl_lvl(struct context *context,
+			   struct netlbl_lsm_secattr *secattr)
+{
+	if (!policydb.mls_enabled)
+		return;
+
+	context->range.level[0].sens = secattr->attr.mls.lvl + 1;
+	context->range.level[1].sens = context->range.level[0].sens;
+}
+
+/**
+ * mls_export_netlbl_cat - Export the MLS categories to NetLabel
+ * @context: the security context
+ * @secattr: the NetLabel security attributes
+ *
+ * Description:
+ * Given the security context copy the low MLS categories into the NetLabel
+ * MLS category field.  Returns zero on success, negative values on failure.
+ *
+ */
+int mls_export_netlbl_cat(struct context *context,
+			  struct netlbl_lsm_secattr *secattr)
+{
+	int rc;
+
+	if (!policydb.mls_enabled)
+		return 0;
+
+	rc = ebitmap_netlbl_export(&context->range.level[0].cat,
+				   &secattr->attr.mls.cat);
+	if (rc == 0 && secattr->attr.mls.cat != NULL)
+		secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+
+	return rc;
+}
+
+/**
+ * mls_import_netlbl_cat - Import the MLS categories from NetLabel
+ * @context: the security context
+ * @secattr: the NetLabel security attributes
+ *
+ * Description:
+ * Copy the NetLabel security attributes into the SELinux context; since the
+ * NetLabel security attribute only contains a single MLS category use it for
+ * both the low and high categories of the context.  Returns zero on success,
+ * negative values on failure.
+ *
+ */
+int mls_import_netlbl_cat(struct context *context,
+			  struct netlbl_lsm_secattr *secattr)
+{
+	int rc;
+
+	if (!policydb.mls_enabled)
+		return 0;
+
+	rc = ebitmap_netlbl_import(&context->range.level[0].cat,
+				   secattr->attr.mls.cat);
+	if (rc)
+		goto import_netlbl_cat_failure;
+	memcpy(&context->range.level[1].cat, &context->range.level[0].cat,
+	       sizeof(context->range.level[0].cat));
+
+	return 0;
+
+import_netlbl_cat_failure:
+	ebitmap_destroy(&context->range.level[0].cat);
+	return rc;
+}
+#endif /* CONFIG_NETLABEL */
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
new file mode 100644
index 0000000..e4369e3
--- /dev/null
+++ b/security/selinux/ss/mls.h
@@ -0,0 +1,91 @@
+/*
+ * Multi-level security (MLS) policy operations.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+/*
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ *	Support for enhanced MLS infrastructure.
+ *
+ * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
+ */
+/*
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ *	Added support to import/export the MLS label from NetLabel
+ *
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
+ */
+
+#ifndef _SS_MLS_H_
+#define _SS_MLS_H_
+
+#include "context.h"
+#include "policydb.h"
+
+int mls_compute_context_len(struct context *context);
+void mls_sid_to_context(struct context *context, char **scontext);
+int mls_context_isvalid(struct policydb *p, struct context *c);
+int mls_range_isvalid(struct policydb *p, struct mls_range *r);
+int mls_level_isvalid(struct policydb *p, struct mls_level *l);
+
+int mls_context_to_sid(struct policydb *p,
+		       char oldc,
+		       char **scontext,
+		       struct context *context,
+		       struct sidtab *s,
+		       u32 def_sid);
+
+int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
+
+int mls_range_set(struct context *context, struct mls_range *range);
+
+int mls_convert_context(struct policydb *oldp,
+			struct policydb *newp,
+			struct context *context);
+
+int mls_compute_sid(struct context *scontext,
+		    struct context *tcontext,
+		    u16 tclass,
+		    u32 specified,
+		    struct context *newcontext,
+		    bool sock);
+
+int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
+			 struct context *usercon);
+
+#ifdef CONFIG_NETLABEL
+void mls_export_netlbl_lvl(struct context *context,
+			   struct netlbl_lsm_secattr *secattr);
+void mls_import_netlbl_lvl(struct context *context,
+			   struct netlbl_lsm_secattr *secattr);
+int mls_export_netlbl_cat(struct context *context,
+			  struct netlbl_lsm_secattr *secattr);
+int mls_import_netlbl_cat(struct context *context,
+			  struct netlbl_lsm_secattr *secattr);
+#else
+static inline void mls_export_netlbl_lvl(struct context *context,
+					 struct netlbl_lsm_secattr *secattr)
+{
+	return;
+}
+static inline void mls_import_netlbl_lvl(struct context *context,
+					 struct netlbl_lsm_secattr *secattr)
+{
+	return;
+}
+static inline int mls_export_netlbl_cat(struct context *context,
+					struct netlbl_lsm_secattr *secattr)
+{
+	return -ENOMEM;
+}
+static inline int mls_import_netlbl_cat(struct context *context,
+					struct netlbl_lsm_secattr *secattr)
+{
+	return -ENOMEM;
+}
+#endif
+
+#endif	/* _SS_MLS_H */
+
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
new file mode 100644
index 0000000..e936487
--- /dev/null
+++ b/security/selinux/ss/mls_types.h
@@ -0,0 +1,51 @@
+/*
+ * Type definitions for the multi-level security (MLS) policy.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+/*
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ *	Support for enhanced MLS infrastructure.
+ *
+ * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
+ */
+
+#ifndef _SS_MLS_TYPES_H_
+#define _SS_MLS_TYPES_H_
+
+#include "security.h"
+#include "ebitmap.h"
+
+struct mls_level {
+	u32 sens;		/* sensitivity */
+	struct ebitmap cat;	/* category set */
+};
+
+struct mls_range {
+	struct mls_level level[2]; /* low == level[0], high == level[1] */
+};
+
+static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
+{
+	return ((l1->sens == l2->sens) &&
+		ebitmap_cmp(&l1->cat, &l2->cat));
+}
+
+static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
+{
+	return ((l1->sens >= l2->sens) &&
+		ebitmap_contains(&l1->cat, &l2->cat, 0));
+}
+
+#define mls_level_incomp(l1, l2) \
+(!mls_level_dom((l1), (l2)) && !mls_level_dom((l2), (l1)))
+
+#define mls_level_between(l1, l2, l3) \
+(mls_level_dom((l1), (l2)) && mls_level_dom((l3), (l1)))
+
+#define mls_range_contains(r1, r2) \
+(mls_level_dom(&(r2).level[0], &(r1).level[0]) && \
+ mls_level_dom(&(r1).level[1], &(r2).level[1]))
+
+#endif	/* _SS_MLS_TYPES_H_ */
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
new file mode 100644
index 0000000..992a315
--- /dev/null
+++ b/security/selinux/ss/policydb.c
@@ -0,0 +1,3465 @@
+/*
+ * Implementation of the policy database.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+
+/*
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ *	Support for enhanced MLS infrastructure.
+ *
+ * Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
+ *
+ *	Added conditional policy language extensions
+ *
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ *      Added support for the policy capability bitmap
+ *
+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/audit.h>
+#include <linux/flex_array.h>
+#include "security.h"
+
+#include "policydb.h"
+#include "conditional.h"
+#include "mls.h"
+#include "services.h"
+
+#define _DEBUG_HASHES
+
+#ifdef DEBUG_HASHES
+static const char *symtab_name[SYM_NUM] = {
+	"common prefixes",
+	"classes",
+	"roles",
+	"types",
+	"users",
+	"bools",
+	"levels",
+	"categories",
+};
+#endif
+
+static unsigned int symtab_sizes[SYM_NUM] = {
+	2,
+	32,
+	16,
+	512,
+	128,
+	16,
+	16,
+	16,
+};
+
+struct policydb_compat_info {
+	int version;
+	int sym_num;
+	int ocon_num;
+};
+
+/* These need to be updated if SYM_NUM or OCON_NUM changes */
+static struct policydb_compat_info policydb_compat[] = {
+	{
+		.version	= POLICYDB_VERSION_BASE,
+		.sym_num	= SYM_NUM - 3,
+		.ocon_num	= OCON_NUM - 1,
+	},
+	{
+		.version	= POLICYDB_VERSION_BOOL,
+		.sym_num	= SYM_NUM - 2,
+		.ocon_num	= OCON_NUM - 1,
+	},
+	{
+		.version	= POLICYDB_VERSION_IPV6,
+		.sym_num	= SYM_NUM - 2,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_NLCLASS,
+		.sym_num	= SYM_NUM - 2,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_MLS,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_AVTAB,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_RANGETRANS,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_POLCAP,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_PERMISSIVE,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_BOUNDARY,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_FILENAME_TRANS,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_ROLETRANS,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_DEFAULT_TYPE,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_CONSTRAINT_NAMES,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_XPERMS_IOCTL,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+};
+
+static struct policydb_compat_info *policydb_lookup_compat(int version)
+{
+	int i;
+	struct policydb_compat_info *info = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(policydb_compat); i++) {
+		if (policydb_compat[i].version == version) {
+			info = &policydb_compat[i];
+			break;
+		}
+	}
+	return info;
+}
+
+/*
+ * Initialize the role table.
+ */
+static int roles_init(struct policydb *p)
+{
+	char *key = NULL;
+	int rc;
+	struct role_datum *role;
+
+	rc = -ENOMEM;
+	role = kzalloc(sizeof(*role), GFP_KERNEL);
+	if (!role)
+		goto out;
+
+	rc = -EINVAL;
+	role->value = ++p->p_roles.nprim;
+	if (role->value != OBJECT_R_VAL)
+		goto out;
+
+	rc = -ENOMEM;
+	key = kstrdup(OBJECT_R, GFP_KERNEL);
+	if (!key)
+		goto out;
+
+	rc = hashtab_insert(p->p_roles.table, key, role);
+	if (rc)
+		goto out;
+
+	return 0;
+out:
+	kfree(key);
+	kfree(role);
+	return rc;
+}
+
+static u32 filenametr_hash(struct hashtab *h, const void *k)
+{
+	const struct filename_trans *ft = k;
+	unsigned long hash;
+	unsigned int byte_num;
+	unsigned char focus;
+
+	hash = ft->stype ^ ft->ttype ^ ft->tclass;
+
+	byte_num = 0;
+	while ((focus = ft->name[byte_num++]))
+		hash = partial_name_hash(focus, hash);
+	return hash & (h->size - 1);
+}
+
+static int filenametr_cmp(struct hashtab *h, const void *k1, const void *k2)
+{
+	const struct filename_trans *ft1 = k1;
+	const struct filename_trans *ft2 = k2;
+	int v;
+
+	v = ft1->stype - ft2->stype;
+	if (v)
+		return v;
+
+	v = ft1->ttype - ft2->ttype;
+	if (v)
+		return v;
+
+	v = ft1->tclass - ft2->tclass;
+	if (v)
+		return v;
+
+	return strcmp(ft1->name, ft2->name);
+
+}
+
+static u32 rangetr_hash(struct hashtab *h, const void *k)
+{
+	const struct range_trans *key = k;
+	return (key->source_type + (key->target_type << 3) +
+		(key->target_class << 5)) & (h->size - 1);
+}
+
+static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
+{
+	const struct range_trans *key1 = k1, *key2 = k2;
+	int v;
+
+	v = key1->source_type - key2->source_type;
+	if (v)
+		return v;
+
+	v = key1->target_type - key2->target_type;
+	if (v)
+		return v;
+
+	v = key1->target_class - key2->target_class;
+
+	return v;
+}
+
+/*
+ * Initialize a policy database structure.
+ */
+static int policydb_init(struct policydb *p)
+{
+	int i, rc;
+
+	memset(p, 0, sizeof(*p));
+
+	for (i = 0; i < SYM_NUM; i++) {
+		rc = symtab_init(&p->symtab[i], symtab_sizes[i]);
+		if (rc)
+			goto out;
+	}
+
+	rc = avtab_init(&p->te_avtab);
+	if (rc)
+		goto out;
+
+	rc = roles_init(p);
+	if (rc)
+		goto out;
+
+	rc = cond_policydb_init(p);
+	if (rc)
+		goto out;
+
+	p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp, (1 << 10));
+	if (!p->filename_trans) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256);
+	if (!p->range_tr) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	ebitmap_init(&p->filename_trans_ttypes);
+	ebitmap_init(&p->policycaps);
+	ebitmap_init(&p->permissive_map);
+
+	return 0;
+out:
+	hashtab_destroy(p->filename_trans);
+	hashtab_destroy(p->range_tr);
+	for (i = 0; i < SYM_NUM; i++)
+		hashtab_destroy(p->symtab[i].table);
+	return rc;
+}
+
+/*
+ * The following *_index functions are used to
+ * define the val_to_name and val_to_struct arrays
+ * in a policy database structure.  The val_to_name
+ * arrays are used when converting security context
+ * structures into string representations.  The
+ * val_to_struct arrays are used when the attributes
+ * of a class, role, or user are needed.
+ */
+
+static int common_index(void *key, void *datum, void *datap)
+{
+	struct policydb *p;
+	struct common_datum *comdatum;
+	struct flex_array *fa;
+
+	comdatum = datum;
+	p = datap;
+	if (!comdatum->value || comdatum->value > p->p_commons.nprim)
+		return -EINVAL;
+
+	fa = p->sym_val_to_name[SYM_COMMONS];
+	if (flex_array_put_ptr(fa, comdatum->value - 1, key,
+			       GFP_KERNEL | __GFP_ZERO))
+		BUG();
+	return 0;
+}
+
+static int class_index(void *key, void *datum, void *datap)
+{
+	struct policydb *p;
+	struct class_datum *cladatum;
+	struct flex_array *fa;
+
+	cladatum = datum;
+	p = datap;
+	if (!cladatum->value || cladatum->value > p->p_classes.nprim)
+		return -EINVAL;
+	fa = p->sym_val_to_name[SYM_CLASSES];
+	if (flex_array_put_ptr(fa, cladatum->value - 1, key,
+			       GFP_KERNEL | __GFP_ZERO))
+		BUG();
+	p->class_val_to_struct[cladatum->value - 1] = cladatum;
+	return 0;
+}
+
+static int role_index(void *key, void *datum, void *datap)
+{
+	struct policydb *p;
+	struct role_datum *role;
+	struct flex_array *fa;
+
+	role = datum;
+	p = datap;
+	if (!role->value
+	    || role->value > p->p_roles.nprim
+	    || role->bounds > p->p_roles.nprim)
+		return -EINVAL;
+
+	fa = p->sym_val_to_name[SYM_ROLES];
+	if (flex_array_put_ptr(fa, role->value - 1, key,
+			       GFP_KERNEL | __GFP_ZERO))
+		BUG();
+	p->role_val_to_struct[role->value - 1] = role;
+	return 0;
+}
+
+static int type_index(void *key, void *datum, void *datap)
+{
+	struct policydb *p;
+	struct type_datum *typdatum;
+	struct flex_array *fa;
+
+	typdatum = datum;
+	p = datap;
+
+	if (typdatum->primary) {
+		if (!typdatum->value
+		    || typdatum->value > p->p_types.nprim
+		    || typdatum->bounds > p->p_types.nprim)
+			return -EINVAL;
+		fa = p->sym_val_to_name[SYM_TYPES];
+		if (flex_array_put_ptr(fa, typdatum->value - 1, key,
+				       GFP_KERNEL | __GFP_ZERO))
+			BUG();
+
+		fa = p->type_val_to_struct_array;
+		if (flex_array_put_ptr(fa, typdatum->value - 1, typdatum,
+				       GFP_KERNEL | __GFP_ZERO))
+			BUG();
+	}
+
+	return 0;
+}
+
+static int user_index(void *key, void *datum, void *datap)
+{
+	struct policydb *p;
+	struct user_datum *usrdatum;
+	struct flex_array *fa;
+
+	usrdatum = datum;
+	p = datap;
+	if (!usrdatum->value
+	    || usrdatum->value > p->p_users.nprim
+	    || usrdatum->bounds > p->p_users.nprim)
+		return -EINVAL;
+
+	fa = p->sym_val_to_name[SYM_USERS];
+	if (flex_array_put_ptr(fa, usrdatum->value - 1, key,
+			       GFP_KERNEL | __GFP_ZERO))
+		BUG();
+	p->user_val_to_struct[usrdatum->value - 1] = usrdatum;
+	return 0;
+}
+
+static int sens_index(void *key, void *datum, void *datap)
+{
+	struct policydb *p;
+	struct level_datum *levdatum;
+	struct flex_array *fa;
+
+	levdatum = datum;
+	p = datap;
+
+	if (!levdatum->isalias) {
+		if (!levdatum->level->sens ||
+		    levdatum->level->sens > p->p_levels.nprim)
+			return -EINVAL;
+		fa = p->sym_val_to_name[SYM_LEVELS];
+		if (flex_array_put_ptr(fa, levdatum->level->sens - 1, key,
+				       GFP_KERNEL | __GFP_ZERO))
+			BUG();
+	}
+
+	return 0;
+}
+
+static int cat_index(void *key, void *datum, void *datap)
+{
+	struct policydb *p;
+	struct cat_datum *catdatum;
+	struct flex_array *fa;
+
+	catdatum = datum;
+	p = datap;
+
+	if (!catdatum->isalias) {
+		if (!catdatum->value || catdatum->value > p->p_cats.nprim)
+			return -EINVAL;
+		fa = p->sym_val_to_name[SYM_CATS];
+		if (flex_array_put_ptr(fa, catdatum->value - 1, key,
+				       GFP_KERNEL | __GFP_ZERO))
+			BUG();
+	}
+
+	return 0;
+}
+
+static int (*index_f[SYM_NUM]) (void *key, void *datum, void *datap) =
+{
+	common_index,
+	class_index,
+	role_index,
+	type_index,
+	user_index,
+	cond_index_bool,
+	sens_index,
+	cat_index,
+};
+
+#ifdef DEBUG_HASHES
+static void hash_eval(struct hashtab *h, const char *hash_name)
+{
+	struct hashtab_info info;
+
+	hashtab_stat(h, &info);
+	printk(KERN_DEBUG "SELinux: %s:  %d entries and %d/%d buckets used, "
+	       "longest chain length %d\n", hash_name, h->nel,
+	       info.slots_used, h->size, info.max_chain_len);
+}
+
+static void symtab_hash_eval(struct symtab *s)
+{
+	int i;
+
+	for (i = 0; i < SYM_NUM; i++)
+		hash_eval(s[i].table, symtab_name[i]);
+}
+
+#else
+static inline void hash_eval(struct hashtab *h, char *hash_name)
+{
+}
+#endif
+
+/*
+ * Define the other val_to_name and val_to_struct arrays
+ * in a policy database structure.
+ *
+ * Caller must clean up on failure.
+ */
+static int policydb_index(struct policydb *p)
+{
+	int i, rc;
+
+	printk(KERN_DEBUG "SELinux:  %d users, %d roles, %d types, %d bools",
+	       p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
+	if (p->mls_enabled)
+		printk(", %d sens, %d cats", p->p_levels.nprim,
+		       p->p_cats.nprim);
+	printk("\n");
+
+	printk(KERN_DEBUG "SELinux:  %d classes, %d rules\n",
+	       p->p_classes.nprim, p->te_avtab.nel);
+
+#ifdef DEBUG_HASHES
+	avtab_hash_eval(&p->te_avtab, "rules");
+	symtab_hash_eval(p->symtab);
+#endif
+
+	rc = -ENOMEM;
+	p->class_val_to_struct =
+		kmalloc(p->p_classes.nprim * sizeof(*(p->class_val_to_struct)),
+			GFP_KERNEL);
+	if (!p->class_val_to_struct)
+		goto out;
+
+	rc = -ENOMEM;
+	p->role_val_to_struct =
+		kmalloc(p->p_roles.nprim * sizeof(*(p->role_val_to_struct)),
+			GFP_KERNEL);
+	if (!p->role_val_to_struct)
+		goto out;
+
+	rc = -ENOMEM;
+	p->user_val_to_struct =
+		kmalloc(p->p_users.nprim * sizeof(*(p->user_val_to_struct)),
+			GFP_KERNEL);
+	if (!p->user_val_to_struct)
+		goto out;
+
+	/* Yes, I want the sizeof the pointer, not the structure */
+	rc = -ENOMEM;
+	p->type_val_to_struct_array = flex_array_alloc(sizeof(struct type_datum *),
+						       p->p_types.nprim,
+						       GFP_KERNEL | __GFP_ZERO);
+	if (!p->type_val_to_struct_array)
+		goto out;
+
+	rc = flex_array_prealloc(p->type_val_to_struct_array, 0,
+				 p->p_types.nprim, GFP_KERNEL | __GFP_ZERO);
+	if (rc)
+		goto out;
+
+	rc = cond_init_bool_indexes(p);
+	if (rc)
+		goto out;
+
+	for (i = 0; i < SYM_NUM; i++) {
+		rc = -ENOMEM;
+		p->sym_val_to_name[i] = flex_array_alloc(sizeof(char *),
+							 p->symtab[i].nprim,
+							 GFP_KERNEL | __GFP_ZERO);
+		if (!p->sym_val_to_name[i])
+			goto out;
+
+		rc = flex_array_prealloc(p->sym_val_to_name[i],
+					 0, p->symtab[i].nprim,
+					 GFP_KERNEL | __GFP_ZERO);
+		if (rc)
+			goto out;
+
+		rc = hashtab_map(p->symtab[i].table, index_f[i], p);
+		if (rc)
+			goto out;
+	}
+	rc = 0;
+out:
+	return rc;
+}
+
+/*
+ * The following *_destroy functions are used to
+ * free any memory allocated for each kind of
+ * symbol data in the policy database.
+ */
+
+static int perm_destroy(void *key, void *datum, void *p)
+{
+	kfree(key);
+	kfree(datum);
+	return 0;
+}
+
+static int common_destroy(void *key, void *datum, void *p)
+{
+	struct common_datum *comdatum;
+
+	kfree(key);
+	if (datum) {
+		comdatum = datum;
+		hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
+		hashtab_destroy(comdatum->permissions.table);
+	}
+	kfree(datum);
+	return 0;
+}
+
+static void constraint_expr_destroy(struct constraint_expr *expr)
+{
+	if (expr) {
+		ebitmap_destroy(&expr->names);
+		if (expr->type_names) {
+			ebitmap_destroy(&expr->type_names->types);
+			ebitmap_destroy(&expr->type_names->negset);
+			kfree(expr->type_names);
+		}
+		kfree(expr);
+	}
+}
+
+static int cls_destroy(void *key, void *datum, void *p)
+{
+	struct class_datum *cladatum;
+	struct constraint_node *constraint, *ctemp;
+	struct constraint_expr *e, *etmp;
+
+	kfree(key);
+	if (datum) {
+		cladatum = datum;
+		hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
+		hashtab_destroy(cladatum->permissions.table);
+		constraint = cladatum->constraints;
+		while (constraint) {
+			e = constraint->expr;
+			while (e) {
+				etmp = e;
+				e = e->next;
+				constraint_expr_destroy(etmp);
+			}
+			ctemp = constraint;
+			constraint = constraint->next;
+			kfree(ctemp);
+		}
+
+		constraint = cladatum->validatetrans;
+		while (constraint) {
+			e = constraint->expr;
+			while (e) {
+				etmp = e;
+				e = e->next;
+				constraint_expr_destroy(etmp);
+			}
+			ctemp = constraint;
+			constraint = constraint->next;
+			kfree(ctemp);
+		}
+		kfree(cladatum->comkey);
+	}
+	kfree(datum);
+	return 0;
+}
+
+static int role_destroy(void *key, void *datum, void *p)
+{
+	struct role_datum *role;
+
+	kfree(key);
+	if (datum) {
+		role = datum;
+		ebitmap_destroy(&role->dominates);
+		ebitmap_destroy(&role->types);
+	}
+	kfree(datum);
+	return 0;
+}
+
+static int type_destroy(void *key, void *datum, void *p)
+{
+	kfree(key);
+	kfree(datum);
+	return 0;
+}
+
+static int user_destroy(void *key, void *datum, void *p)
+{
+	struct user_datum *usrdatum;
+
+	kfree(key);
+	if (datum) {
+		usrdatum = datum;
+		ebitmap_destroy(&usrdatum->roles);
+		ebitmap_destroy(&usrdatum->range.level[0].cat);
+		ebitmap_destroy(&usrdatum->range.level[1].cat);
+		ebitmap_destroy(&usrdatum->dfltlevel.cat);
+	}
+	kfree(datum);
+	return 0;
+}
+
+static int sens_destroy(void *key, void *datum, void *p)
+{
+	struct level_datum *levdatum;
+
+	kfree(key);
+	if (datum) {
+		levdatum = datum;
+		ebitmap_destroy(&levdatum->level->cat);
+		kfree(levdatum->level);
+	}
+	kfree(datum);
+	return 0;
+}
+
+static int cat_destroy(void *key, void *datum, void *p)
+{
+	kfree(key);
+	kfree(datum);
+	return 0;
+}
+
+static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
+{
+	common_destroy,
+	cls_destroy,
+	role_destroy,
+	type_destroy,
+	user_destroy,
+	cond_destroy_bool,
+	sens_destroy,
+	cat_destroy,
+};
+
+static int filenametr_destroy(void *key, void *datum, void *p)
+{
+	struct filename_trans *ft = key;
+	kfree(ft->name);
+	kfree(key);
+	kfree(datum);
+	cond_resched();
+	return 0;
+}
+
+static int range_tr_destroy(void *key, void *datum, void *p)
+{
+	struct mls_range *rt = datum;
+	kfree(key);
+	ebitmap_destroy(&rt->level[0].cat);
+	ebitmap_destroy(&rt->level[1].cat);
+	kfree(datum);
+	cond_resched();
+	return 0;
+}
+
+static void ocontext_destroy(struct ocontext *c, int i)
+{
+	if (!c)
+		return;
+
+	context_destroy(&c->context[0]);
+	context_destroy(&c->context[1]);
+	if (i == OCON_ISID || i == OCON_FS ||
+	    i == OCON_NETIF || i == OCON_FSUSE)
+		kfree(c->u.name);
+	kfree(c);
+}
+
+/*
+ * Free any memory allocated by a policy database structure.
+ */
+void policydb_destroy(struct policydb *p)
+{
+	struct ocontext *c, *ctmp;
+	struct genfs *g, *gtmp;
+	int i;
+	struct role_allow *ra, *lra = NULL;
+	struct role_trans *tr, *ltr = NULL;
+
+	for (i = 0; i < SYM_NUM; i++) {
+		cond_resched();
+		hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
+		hashtab_destroy(p->symtab[i].table);
+	}
+
+	for (i = 0; i < SYM_NUM; i++) {
+		if (p->sym_val_to_name[i])
+			flex_array_free(p->sym_val_to_name[i]);
+	}
+
+	kfree(p->class_val_to_struct);
+	kfree(p->role_val_to_struct);
+	kfree(p->user_val_to_struct);
+	if (p->type_val_to_struct_array)
+		flex_array_free(p->type_val_to_struct_array);
+
+	avtab_destroy(&p->te_avtab);
+
+	for (i = 0; i < OCON_NUM; i++) {
+		cond_resched();
+		c = p->ocontexts[i];
+		while (c) {
+			ctmp = c;
+			c = c->next;
+			ocontext_destroy(ctmp, i);
+		}
+		p->ocontexts[i] = NULL;
+	}
+
+	g = p->genfs;
+	while (g) {
+		cond_resched();
+		kfree(g->fstype);
+		c = g->head;
+		while (c) {
+			ctmp = c;
+			c = c->next;
+			ocontext_destroy(ctmp, OCON_FSUSE);
+		}
+		gtmp = g;
+		g = g->next;
+		kfree(gtmp);
+	}
+	p->genfs = NULL;
+
+	cond_policydb_destroy(p);
+
+	for (tr = p->role_tr; tr; tr = tr->next) {
+		cond_resched();
+		kfree(ltr);
+		ltr = tr;
+	}
+	kfree(ltr);
+
+	for (ra = p->role_allow; ra; ra = ra->next) {
+		cond_resched();
+		kfree(lra);
+		lra = ra;
+	}
+	kfree(lra);
+
+	hashtab_map(p->filename_trans, filenametr_destroy, NULL);
+	hashtab_destroy(p->filename_trans);
+
+	hashtab_map(p->range_tr, range_tr_destroy, NULL);
+	hashtab_destroy(p->range_tr);
+
+	if (p->type_attr_map_array) {
+		for (i = 0; i < p->p_types.nprim; i++) {
+			struct ebitmap *e;
+
+			e = flex_array_get(p->type_attr_map_array, i);
+			if (!e)
+				continue;
+			ebitmap_destroy(e);
+		}
+		flex_array_free(p->type_attr_map_array);
+	}
+
+	ebitmap_destroy(&p->filename_trans_ttypes);
+	ebitmap_destroy(&p->policycaps);
+	ebitmap_destroy(&p->permissive_map);
+
+	return;
+}
+
+/*
+ * Load the initial SIDs specified in a policy database
+ * structure into a SID table.
+ */
+int policydb_load_isids(struct policydb *p, struct sidtab *s)
+{
+	struct ocontext *head, *c;
+	int rc;
+
+	rc = sidtab_init(s);
+	if (rc) {
+		printk(KERN_ERR "SELinux:  out of memory on SID table init\n");
+		goto out;
+	}
+
+	head = p->ocontexts[OCON_ISID];
+	for (c = head; c; c = c->next) {
+		rc = -EINVAL;
+		if (!c->context[0].user) {
+			printk(KERN_ERR "SELinux:  SID %s was never defined.\n",
+				c->u.name);
+			goto out;
+		}
+
+		rc = sidtab_insert(s, c->sid[0], &c->context[0]);
+		if (rc) {
+			printk(KERN_ERR "SELinux:  unable to load initial SID %s.\n",
+				c->u.name);
+			goto out;
+		}
+	}
+	rc = 0;
+out:
+	return rc;
+}
+
+int policydb_class_isvalid(struct policydb *p, unsigned int class)
+{
+	if (!class || class > p->p_classes.nprim)
+		return 0;
+	return 1;
+}
+
+int policydb_role_isvalid(struct policydb *p, unsigned int role)
+{
+	if (!role || role > p->p_roles.nprim)
+		return 0;
+	return 1;
+}
+
+int policydb_type_isvalid(struct policydb *p, unsigned int type)
+{
+	if (!type || type > p->p_types.nprim)
+		return 0;
+	return 1;
+}
+
+/*
+ * Return 1 if the fields in the security context
+ * structure `c' are valid.  Return 0 otherwise.
+ */
+int policydb_context_isvalid(struct policydb *p, struct context *c)
+{
+	struct role_datum *role;
+	struct user_datum *usrdatum;
+
+	if (!c->role || c->role > p->p_roles.nprim)
+		return 0;
+
+	if (!c->user || c->user > p->p_users.nprim)
+		return 0;
+
+	if (!c->type || c->type > p->p_types.nprim)
+		return 0;
+
+	if (c->role != OBJECT_R_VAL) {
+		/*
+		 * Role must be authorized for the type.
+		 */
+		role = p->role_val_to_struct[c->role - 1];
+		if (!ebitmap_get_bit(&role->types, c->type - 1))
+			/* role may not be associated with type */
+			return 0;
+
+		/*
+		 * User must be authorized for the role.
+		 */
+		usrdatum = p->user_val_to_struct[c->user - 1];
+		if (!usrdatum)
+			return 0;
+
+		if (!ebitmap_get_bit(&usrdatum->roles, c->role - 1))
+			/* user may not be associated with role */
+			return 0;
+	}
+
+	if (!mls_context_isvalid(p, c))
+		return 0;
+
+	return 1;
+}
+
+/*
+ * Read a MLS range structure from a policydb binary
+ * representation file.
+ */
+static int mls_read_range_helper(struct mls_range *r, void *fp)
+{
+	__le32 buf[2];
+	u32 items;
+	int rc;
+
+	rc = next_entry(buf, fp, sizeof(u32));
+	if (rc)
+		goto out;
+
+	rc = -EINVAL;
+	items = le32_to_cpu(buf[0]);
+	if (items > ARRAY_SIZE(buf)) {
+		printk(KERN_ERR "SELinux: mls:  range overflow\n");
+		goto out;
+	}
+
+	rc = next_entry(buf, fp, sizeof(u32) * items);
+	if (rc) {
+		printk(KERN_ERR "SELinux: mls:  truncated range\n");
+		goto out;
+	}
+
+	r->level[0].sens = le32_to_cpu(buf[0]);
+	if (items > 1)
+		r->level[1].sens = le32_to_cpu(buf[1]);
+	else
+		r->level[1].sens = r->level[0].sens;
+
+	rc = ebitmap_read(&r->level[0].cat, fp);
+	if (rc) {
+		printk(KERN_ERR "SELinux: mls:  error reading low categories\n");
+		goto out;
+	}
+	if (items > 1) {
+		rc = ebitmap_read(&r->level[1].cat, fp);
+		if (rc) {
+			printk(KERN_ERR "SELinux: mls:  error reading high categories\n");
+			goto bad_high;
+		}
+	} else {
+		rc = ebitmap_cpy(&r->level[1].cat, &r->level[0].cat);
+		if (rc) {
+			printk(KERN_ERR "SELinux: mls:  out of memory\n");
+			goto bad_high;
+		}
+	}
+
+	return 0;
+bad_high:
+	ebitmap_destroy(&r->level[0].cat);
+out:
+	return rc;
+}
+
+/*
+ * Read and validate a security context structure
+ * from a policydb binary representation file.
+ */
+static int context_read_and_validate(struct context *c,
+				     struct policydb *p,
+				     void *fp)
+{
+	__le32 buf[3];
+	int rc;
+
+	rc = next_entry(buf, fp, sizeof buf);
+	if (rc) {
+		printk(KERN_ERR "SELinux: context truncated\n");
+		goto out;
+	}
+	c->user = le32_to_cpu(buf[0]);
+	c->role = le32_to_cpu(buf[1]);
+	c->type = le32_to_cpu(buf[2]);
+	if (p->policyvers >= POLICYDB_VERSION_MLS) {
+		rc = mls_read_range_helper(&c->range, fp);
+		if (rc) {
+			printk(KERN_ERR "SELinux: error reading MLS range of context\n");
+			goto out;
+		}
+	}
+
+	rc = -EINVAL;
+	if (!policydb_context_isvalid(p, c)) {
+		printk(KERN_ERR "SELinux:  invalid security context\n");
+		context_destroy(c);
+		goto out;
+	}
+	rc = 0;
+out:
+	return rc;
+}
+
+/*
+ * The following *_read functions are used to
+ * read the symbol data from a policy database
+ * binary representation file.
+ */
+
+static int str_read(char **strp, gfp_t flags, void *fp, u32 len)
+{
+	int rc;
+	char *str;
+
+	str = kmalloc(len + 1, flags);
+	if (!str)
+		return -ENOMEM;
+
+	/* it's expected the caller should free the str */
+	*strp = str;
+
+	rc = next_entry(str, fp, len);
+	if (rc)
+		return rc;
+
+	str[len] = '\0';
+	return 0;
+}
+
+static int perm_read(struct policydb *p, struct hashtab *h, void *fp)
+{
+	char *key = NULL;
+	struct perm_datum *perdatum;
+	int rc;
+	__le32 buf[2];
+	u32 len;
+
+	rc = -ENOMEM;
+	perdatum = kzalloc(sizeof(*perdatum), GFP_KERNEL);
+	if (!perdatum)
+		goto bad;
+
+	rc = next_entry(buf, fp, sizeof buf);
+	if (rc)
+		goto bad;
+
+	len = le32_to_cpu(buf[0]);
+	perdatum->value = le32_to_cpu(buf[1]);
+
+	rc = str_read(&key, GFP_KERNEL, fp, len);
+	if (rc)
+		goto bad;
+
+	rc = hashtab_insert(h, key, perdatum);
+	if (rc)
+		goto bad;
+
+	return 0;
+bad:
+	perm_destroy(key, perdatum, NULL);
+	return rc;
+}
+
+static int common_read(struct policydb *p, struct hashtab *h, void *fp)
+{
+	char *key = NULL;
+	struct common_datum *comdatum;
+	__le32 buf[4];
+	u32 len, nel;
+	int i, rc;
+
+	rc = -ENOMEM;
+	comdatum = kzalloc(sizeof(*comdatum), GFP_KERNEL);
+	if (!comdatum)
+		goto bad;
+
+	rc = next_entry(buf, fp, sizeof buf);
+	if (rc)
+		goto bad;
+
+	len = le32_to_cpu(buf[0]);
+	comdatum->value = le32_to_cpu(buf[1]);
+
+	rc = symtab_init(&comdatum->permissions, PERM_SYMTAB_SIZE);
+	if (rc)
+		goto bad;
+	comdatum->permissions.nprim = le32_to_cpu(buf[2]);
+	nel = le32_to_cpu(buf[3]);
+
+	rc = str_read(&key, GFP_KERNEL, fp, len);
+	if (rc)
+		goto bad;
+
+	for (i = 0; i < nel; i++) {
+		rc = perm_read(p, comdatum->permissions.table, fp);
+		if (rc)
+			goto bad;
+	}
+
+	rc = hashtab_insert(h, key, comdatum);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	common_destroy(key, comdatum, NULL);
+	return rc;
+}
+
+static void type_set_init(struct type_set *t)
+{
+	ebitmap_init(&t->types);
+	ebitmap_init(&t->negset);
+}
+
+static int type_set_read(struct type_set *t, void *fp)
+{
+	__le32 buf[1];
+	int rc;
+
+	if (ebitmap_read(&t->types, fp))
+		return -EINVAL;
+	if (ebitmap_read(&t->negset, fp))
+		return -EINVAL;
+
+	rc = next_entry(buf, fp, sizeof(u32));
+	if (rc < 0)
+		return -EINVAL;
+	t->flags = le32_to_cpu(buf[0]);
+
+	return 0;
+}
+
+
+static int read_cons_helper(struct policydb *p,
+				struct constraint_node **nodep,
+				int ncons, int allowxtarget, void *fp)
+{
+	struct constraint_node *c, *lc;
+	struct constraint_expr *e, *le;
+	__le32 buf[3];
+	u32 nexpr;
+	int rc, i, j, depth;
+
+	lc = NULL;
+	for (i = 0; i < ncons; i++) {
+		c = kzalloc(sizeof(*c), GFP_KERNEL);
+		if (!c)
+			return -ENOMEM;
+
+		if (lc)
+			lc->next = c;
+		else
+			*nodep = c;
+
+		rc = next_entry(buf, fp, (sizeof(u32) * 2));
+		if (rc)
+			return rc;
+		c->permissions = le32_to_cpu(buf[0]);
+		nexpr = le32_to_cpu(buf[1]);
+		le = NULL;
+		depth = -1;
+		for (j = 0; j < nexpr; j++) {
+			e = kzalloc(sizeof(*e), GFP_KERNEL);
+			if (!e)
+				return -ENOMEM;
+
+			if (le)
+				le->next = e;
+			else
+				c->expr = e;
+
+			rc = next_entry(buf, fp, (sizeof(u32) * 3));
+			if (rc)
+				return rc;
+			e->expr_type = le32_to_cpu(buf[0]);
+			e->attr = le32_to_cpu(buf[1]);
+			e->op = le32_to_cpu(buf[2]);
+
+			switch (e->expr_type) {
+			case CEXPR_NOT:
+				if (depth < 0)
+					return -EINVAL;
+				break;
+			case CEXPR_AND:
+			case CEXPR_OR:
+				if (depth < 1)
+					return -EINVAL;
+				depth--;
+				break;
+			case CEXPR_ATTR:
+				if (depth == (CEXPR_MAXDEPTH - 1))
+					return -EINVAL;
+				depth++;
+				break;
+			case CEXPR_NAMES:
+				if (!allowxtarget && (e->attr & CEXPR_XTARGET))
+					return -EINVAL;
+				if (depth == (CEXPR_MAXDEPTH - 1))
+					return -EINVAL;
+				depth++;
+				rc = ebitmap_read(&e->names, fp);
+				if (rc)
+					return rc;
+				if (p->policyvers >=
+					POLICYDB_VERSION_CONSTRAINT_NAMES) {
+						e->type_names = kzalloc(sizeof
+						(*e->type_names),
+						GFP_KERNEL);
+					if (!e->type_names)
+						return -ENOMEM;
+					type_set_init(e->type_names);
+					rc = type_set_read(e->type_names, fp);
+					if (rc)
+						return rc;
+				}
+				break;
+			default:
+				return -EINVAL;
+			}
+			le = e;
+		}
+		if (depth != 0)
+			return -EINVAL;
+		lc = c;
+	}
+
+	return 0;
+}
+
+static int class_read(struct policydb *p, struct hashtab *h, void *fp)
+{
+	char *key = NULL;
+	struct class_datum *cladatum;
+	__le32 buf[6];
+	u32 len, len2, ncons, nel;
+	int i, rc;
+
+	rc = -ENOMEM;
+	cladatum = kzalloc(sizeof(*cladatum), GFP_KERNEL);
+	if (!cladatum)
+		goto bad;
+
+	rc = next_entry(buf, fp, sizeof(u32)*6);
+	if (rc)
+		goto bad;
+
+	len = le32_to_cpu(buf[0]);
+	len2 = le32_to_cpu(buf[1]);
+	cladatum->value = le32_to_cpu(buf[2]);
+
+	rc = symtab_init(&cladatum->permissions, PERM_SYMTAB_SIZE);
+	if (rc)
+		goto bad;
+	cladatum->permissions.nprim = le32_to_cpu(buf[3]);
+	nel = le32_to_cpu(buf[4]);
+
+	ncons = le32_to_cpu(buf[5]);
+
+	rc = str_read(&key, GFP_KERNEL, fp, len);
+	if (rc)
+		goto bad;
+
+	if (len2) {
+		rc = str_read(&cladatum->comkey, GFP_KERNEL, fp, len2);
+		if (rc)
+			goto bad;
+
+		rc = -EINVAL;
+		cladatum->comdatum = hashtab_search(p->p_commons.table, cladatum->comkey);
+		if (!cladatum->comdatum) {
+			printk(KERN_ERR "SELinux:  unknown common %s\n", cladatum->comkey);
+			goto bad;
+		}
+	}
+	for (i = 0; i < nel; i++) {
+		rc = perm_read(p, cladatum->permissions.table, fp);
+		if (rc)
+			goto bad;
+	}
+
+	rc = read_cons_helper(p, &cladatum->constraints, ncons, 0, fp);
+	if (rc)
+		goto bad;
+
+	if (p->policyvers >= POLICYDB_VERSION_VALIDATETRANS) {
+		/* grab the validatetrans rules */
+		rc = next_entry(buf, fp, sizeof(u32));
+		if (rc)
+			goto bad;
+		ncons = le32_to_cpu(buf[0]);
+		rc = read_cons_helper(p, &cladatum->validatetrans,
+				ncons, 1, fp);
+		if (rc)
+			goto bad;
+	}
+
+	if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
+		rc = next_entry(buf, fp, sizeof(u32) * 3);
+		if (rc)
+			goto bad;
+
+		cladatum->default_user = le32_to_cpu(buf[0]);
+		cladatum->default_role = le32_to_cpu(buf[1]);
+		cladatum->default_range = le32_to_cpu(buf[2]);
+	}
+
+	if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
+		rc = next_entry(buf, fp, sizeof(u32) * 1);
+		if (rc)
+			goto bad;
+		cladatum->default_type = le32_to_cpu(buf[0]);
+	}
+
+	rc = hashtab_insert(h, key, cladatum);
+	if (rc)
+		goto bad;
+
+	return 0;
+bad:
+	cls_destroy(key, cladatum, NULL);
+	return rc;
+}
+
+static int role_read(struct policydb *p, struct hashtab *h, void *fp)
+{
+	char *key = NULL;
+	struct role_datum *role;
+	int rc, to_read = 2;
+	__le32 buf[3];
+	u32 len;
+
+	rc = -ENOMEM;
+	role = kzalloc(sizeof(*role), GFP_KERNEL);
+	if (!role)
+		goto bad;
+
+	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+		to_read = 3;
+
+	rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
+	if (rc)
+		goto bad;
+
+	len = le32_to_cpu(buf[0]);
+	role->value = le32_to_cpu(buf[1]);
+	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+		role->bounds = le32_to_cpu(buf[2]);
+
+	rc = str_read(&key, GFP_KERNEL, fp, len);
+	if (rc)
+		goto bad;
+
+	rc = ebitmap_read(&role->dominates, fp);
+	if (rc)
+		goto bad;
+
+	rc = ebitmap_read(&role->types, fp);
+	if (rc)
+		goto bad;
+
+	if (strcmp(key, OBJECT_R) == 0) {
+		rc = -EINVAL;
+		if (role->value != OBJECT_R_VAL) {
+			printk(KERN_ERR "SELinux: Role %s has wrong value %d\n",
+			       OBJECT_R, role->value);
+			goto bad;
+		}
+		rc = 0;
+		goto bad;
+	}
+
+	rc = hashtab_insert(h, key, role);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	role_destroy(key, role, NULL);
+	return rc;
+}
+
+static int type_read(struct policydb *p, struct hashtab *h, void *fp)
+{
+	char *key = NULL;
+	struct type_datum *typdatum;
+	int rc, to_read = 3;
+	__le32 buf[4];
+	u32 len;
+
+	rc = -ENOMEM;
+	typdatum = kzalloc(sizeof(*typdatum), GFP_KERNEL);
+	if (!typdatum)
+		goto bad;
+
+	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+		to_read = 4;
+
+	rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
+	if (rc)
+		goto bad;
+
+	len = le32_to_cpu(buf[0]);
+	typdatum->value = le32_to_cpu(buf[1]);
+	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
+		u32 prop = le32_to_cpu(buf[2]);
+
+		if (prop & TYPEDATUM_PROPERTY_PRIMARY)
+			typdatum->primary = 1;
+		if (prop & TYPEDATUM_PROPERTY_ATTRIBUTE)
+			typdatum->attribute = 1;
+
+		typdatum->bounds = le32_to_cpu(buf[3]);
+	} else {
+		typdatum->primary = le32_to_cpu(buf[2]);
+	}
+
+	rc = str_read(&key, GFP_KERNEL, fp, len);
+	if (rc)
+		goto bad;
+
+	rc = hashtab_insert(h, key, typdatum);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	type_destroy(key, typdatum, NULL);
+	return rc;
+}
+
+
+/*
+ * Read a MLS level structure from a policydb binary
+ * representation file.
+ */
+static int mls_read_level(struct mls_level *lp, void *fp)
+{
+	__le32 buf[1];
+	int rc;
+
+	memset(lp, 0, sizeof(*lp));
+
+	rc = next_entry(buf, fp, sizeof buf);
+	if (rc) {
+		printk(KERN_ERR "SELinux: mls: truncated level\n");
+		return rc;
+	}
+	lp->sens = le32_to_cpu(buf[0]);
+
+	rc = ebitmap_read(&lp->cat, fp);
+	if (rc) {
+		printk(KERN_ERR "SELinux: mls:  error reading level categories\n");
+		return rc;
+	}
+	return 0;
+}
+
+static int user_read(struct policydb *p, struct hashtab *h, void *fp)
+{
+	char *key = NULL;
+	struct user_datum *usrdatum;
+	int rc, to_read = 2;
+	__le32 buf[3];
+	u32 len;
+
+	rc = -ENOMEM;
+	usrdatum = kzalloc(sizeof(*usrdatum), GFP_KERNEL);
+	if (!usrdatum)
+		goto bad;
+
+	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+		to_read = 3;
+
+	rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
+	if (rc)
+		goto bad;
+
+	len = le32_to_cpu(buf[0]);
+	usrdatum->value = le32_to_cpu(buf[1]);
+	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+		usrdatum->bounds = le32_to_cpu(buf[2]);
+
+	rc = str_read(&key, GFP_KERNEL, fp, len);
+	if (rc)
+		goto bad;
+
+	rc = ebitmap_read(&usrdatum->roles, fp);
+	if (rc)
+		goto bad;
+
+	if (p->policyvers >= POLICYDB_VERSION_MLS) {
+		rc = mls_read_range_helper(&usrdatum->range, fp);
+		if (rc)
+			goto bad;
+		rc = mls_read_level(&usrdatum->dfltlevel, fp);
+		if (rc)
+			goto bad;
+	}
+
+	rc = hashtab_insert(h, key, usrdatum);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	user_destroy(key, usrdatum, NULL);
+	return rc;
+}
+
+static int sens_read(struct policydb *p, struct hashtab *h, void *fp)
+{
+	char *key = NULL;
+	struct level_datum *levdatum;
+	int rc;
+	__le32 buf[2];
+	u32 len;
+
+	rc = -ENOMEM;
+	levdatum = kzalloc(sizeof(*levdatum), GFP_ATOMIC);
+	if (!levdatum)
+		goto bad;
+
+	rc = next_entry(buf, fp, sizeof buf);
+	if (rc)
+		goto bad;
+
+	len = le32_to_cpu(buf[0]);
+	levdatum->isalias = le32_to_cpu(buf[1]);
+
+	rc = str_read(&key, GFP_ATOMIC, fp, len);
+	if (rc)
+		goto bad;
+
+	rc = -ENOMEM;
+	levdatum->level = kmalloc(sizeof(struct mls_level), GFP_ATOMIC);
+	if (!levdatum->level)
+		goto bad;
+
+	rc = mls_read_level(levdatum->level, fp);
+	if (rc)
+		goto bad;
+
+	rc = hashtab_insert(h, key, levdatum);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	sens_destroy(key, levdatum, NULL);
+	return rc;
+}
+
+static int cat_read(struct policydb *p, struct hashtab *h, void *fp)
+{
+	char *key = NULL;
+	struct cat_datum *catdatum;
+	int rc;
+	__le32 buf[3];
+	u32 len;
+
+	rc = -ENOMEM;
+	catdatum = kzalloc(sizeof(*catdatum), GFP_ATOMIC);
+	if (!catdatum)
+		goto bad;
+
+	rc = next_entry(buf, fp, sizeof buf);
+	if (rc)
+		goto bad;
+
+	len = le32_to_cpu(buf[0]);
+	catdatum->value = le32_to_cpu(buf[1]);
+	catdatum->isalias = le32_to_cpu(buf[2]);
+
+	rc = str_read(&key, GFP_ATOMIC, fp, len);
+	if (rc)
+		goto bad;
+
+	rc = hashtab_insert(h, key, catdatum);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	cat_destroy(key, catdatum, NULL);
+	return rc;
+}
+
+static int (*read_f[SYM_NUM]) (struct policydb *p, struct hashtab *h, void *fp) =
+{
+	common_read,
+	class_read,
+	role_read,
+	type_read,
+	user_read,
+	cond_read_bool,
+	sens_read,
+	cat_read,
+};
+
+static int user_bounds_sanity_check(void *key, void *datum, void *datap)
+{
+	struct user_datum *upper, *user;
+	struct policydb *p = datap;
+	int depth = 0;
+
+	upper = user = datum;
+	while (upper->bounds) {
+		struct ebitmap_node *node;
+		unsigned long bit;
+
+		if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
+			printk(KERN_ERR "SELinux: user %s: "
+			       "too deep or looped boundary",
+			       (char *) key);
+			return -EINVAL;
+		}
+
+		upper = p->user_val_to_struct[upper->bounds - 1];
+		ebitmap_for_each_positive_bit(&user->roles, node, bit) {
+			if (ebitmap_get_bit(&upper->roles, bit))
+				continue;
+
+			printk(KERN_ERR
+			       "SELinux: boundary violated policy: "
+			       "user=%s role=%s bounds=%s\n",
+			       sym_name(p, SYM_USERS, user->value - 1),
+			       sym_name(p, SYM_ROLES, bit),
+			       sym_name(p, SYM_USERS, upper->value - 1));
+
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int role_bounds_sanity_check(void *key, void *datum, void *datap)
+{
+	struct role_datum *upper, *role;
+	struct policydb *p = datap;
+	int depth = 0;
+
+	upper = role = datum;
+	while (upper->bounds) {
+		struct ebitmap_node *node;
+		unsigned long bit;
+
+		if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
+			printk(KERN_ERR "SELinux: role %s: "
+			       "too deep or looped bounds\n",
+			       (char *) key);
+			return -EINVAL;
+		}
+
+		upper = p->role_val_to_struct[upper->bounds - 1];
+		ebitmap_for_each_positive_bit(&role->types, node, bit) {
+			if (ebitmap_get_bit(&upper->types, bit))
+				continue;
+
+			printk(KERN_ERR
+			       "SELinux: boundary violated policy: "
+			       "role=%s type=%s bounds=%s\n",
+			       sym_name(p, SYM_ROLES, role->value - 1),
+			       sym_name(p, SYM_TYPES, bit),
+			       sym_name(p, SYM_ROLES, upper->value - 1));
+
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int type_bounds_sanity_check(void *key, void *datum, void *datap)
+{
+	struct type_datum *upper;
+	struct policydb *p = datap;
+	int depth = 0;
+
+	upper = datum;
+	while (upper->bounds) {
+		if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
+			printk(KERN_ERR "SELinux: type %s: "
+			       "too deep or looped boundary\n",
+			       (char *) key);
+			return -EINVAL;
+		}
+
+		upper = flex_array_get_ptr(p->type_val_to_struct_array,
+					   upper->bounds - 1);
+		BUG_ON(!upper);
+
+		if (upper->attribute) {
+			printk(KERN_ERR "SELinux: type %s: "
+			       "bounded by attribute %s",
+			       (char *) key,
+			       sym_name(p, SYM_TYPES, upper->value - 1));
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int policydb_bounds_sanity_check(struct policydb *p)
+{
+	int rc;
+
+	if (p->policyvers < POLICYDB_VERSION_BOUNDARY)
+		return 0;
+
+	rc = hashtab_map(p->p_users.table,
+			 user_bounds_sanity_check, p);
+	if (rc)
+		return rc;
+
+	rc = hashtab_map(p->p_roles.table,
+			 role_bounds_sanity_check, p);
+	if (rc)
+		return rc;
+
+	rc = hashtab_map(p->p_types.table,
+			 type_bounds_sanity_check, p);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+u16 string_to_security_class(struct policydb *p, const char *name)
+{
+	struct class_datum *cladatum;
+
+	cladatum = hashtab_search(p->p_classes.table, name);
+	if (!cladatum)
+		return 0;
+
+	return cladatum->value;
+}
+
+u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name)
+{
+	struct class_datum *cladatum;
+	struct perm_datum *perdatum = NULL;
+	struct common_datum *comdatum;
+
+	if (!tclass || tclass > p->p_classes.nprim)
+		return 0;
+
+	cladatum = p->class_val_to_struct[tclass-1];
+	comdatum = cladatum->comdatum;
+	if (comdatum)
+		perdatum = hashtab_search(comdatum->permissions.table,
+					  name);
+	if (!perdatum)
+		perdatum = hashtab_search(cladatum->permissions.table,
+					  name);
+	if (!perdatum)
+		return 0;
+
+	return 1U << (perdatum->value-1);
+}
+
+static int range_read(struct policydb *p, void *fp)
+{
+	struct range_trans *rt = NULL;
+	struct mls_range *r = NULL;
+	int i, rc;
+	__le32 buf[2];
+	u32 nel;
+
+	if (p->policyvers < POLICYDB_VERSION_MLS)
+		return 0;
+
+	rc = next_entry(buf, fp, sizeof(u32));
+	if (rc)
+		goto out;
+
+	nel = le32_to_cpu(buf[0]);
+	for (i = 0; i < nel; i++) {
+		rc = -ENOMEM;
+		rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+		if (!rt)
+			goto out;
+
+		rc = next_entry(buf, fp, (sizeof(u32) * 2));
+		if (rc)
+			goto out;
+
+		rt->source_type = le32_to_cpu(buf[0]);
+		rt->target_type = le32_to_cpu(buf[1]);
+		if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
+			rc = next_entry(buf, fp, sizeof(u32));
+			if (rc)
+				goto out;
+			rt->target_class = le32_to_cpu(buf[0]);
+		} else
+			rt->target_class = p->process_class;
+
+		rc = -EINVAL;
+		if (!policydb_type_isvalid(p, rt->source_type) ||
+		    !policydb_type_isvalid(p, rt->target_type) ||
+		    !policydb_class_isvalid(p, rt->target_class))
+			goto out;
+
+		rc = -ENOMEM;
+		r = kzalloc(sizeof(*r), GFP_KERNEL);
+		if (!r)
+			goto out;
+
+		rc = mls_read_range_helper(r, fp);
+		if (rc)
+			goto out;
+
+		rc = -EINVAL;
+		if (!mls_range_isvalid(p, r)) {
+			printk(KERN_WARNING "SELinux:  rangetrans:  invalid range\n");
+			goto out;
+		}
+
+		rc = hashtab_insert(p->range_tr, rt, r);
+		if (rc)
+			goto out;
+
+		rt = NULL;
+		r = NULL;
+	}
+	hash_eval(p->range_tr, "rangetr");
+	rc = 0;
+out:
+	kfree(rt);
+	kfree(r);
+	return rc;
+}
+
+static int filename_trans_read(struct policydb *p, void *fp)
+{
+	struct filename_trans *ft;
+	struct filename_trans_datum *otype;
+	char *name;
+	u32 nel, len;
+	__le32 buf[4];
+	int rc, i;
+
+	if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
+		return 0;
+
+	rc = next_entry(buf, fp, sizeof(u32));
+	if (rc)
+		return rc;
+	nel = le32_to_cpu(buf[0]);
+
+	for (i = 0; i < nel; i++) {
+		ft = NULL;
+		otype = NULL;
+		name = NULL;
+
+		rc = -ENOMEM;
+		ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+		if (!ft)
+			goto out;
+
+		rc = -ENOMEM;
+		otype = kmalloc(sizeof(*otype), GFP_KERNEL);
+		if (!otype)
+			goto out;
+
+		/* length of the path component string */
+		rc = next_entry(buf, fp, sizeof(u32));
+		if (rc)
+			goto out;
+		len = le32_to_cpu(buf[0]);
+
+		/* path component string */
+		rc = str_read(&name, GFP_KERNEL, fp, len);
+		if (rc)
+			goto out;
+
+		ft->name = name;
+
+		rc = next_entry(buf, fp, sizeof(u32) * 4);
+		if (rc)
+			goto out;
+
+		ft->stype = le32_to_cpu(buf[0]);
+		ft->ttype = le32_to_cpu(buf[1]);
+		ft->tclass = le32_to_cpu(buf[2]);
+
+		otype->otype = le32_to_cpu(buf[3]);
+
+		rc = ebitmap_set_bit(&p->filename_trans_ttypes, ft->ttype, 1);
+		if (rc)
+			goto out;
+
+		rc = hashtab_insert(p->filename_trans, ft, otype);
+		if (rc) {
+			/*
+			 * Do not return -EEXIST to the caller, or the system
+			 * will not boot.
+			 */
+			if (rc != -EEXIST)
+				goto out;
+			/* But free memory to avoid memory leak. */
+			kfree(ft);
+			kfree(name);
+			kfree(otype);
+		}
+	}
+	hash_eval(p->filename_trans, "filenametr");
+	return 0;
+out:
+	kfree(ft);
+	kfree(name);
+	kfree(otype);
+
+	return rc;
+}
+
+static int genfs_read(struct policydb *p, void *fp)
+{
+	int i, j, rc;
+	u32 nel, nel2, len, len2;
+	__le32 buf[1];
+	struct ocontext *l, *c;
+	struct ocontext *newc = NULL;
+	struct genfs *genfs_p, *genfs;
+	struct genfs *newgenfs = NULL;
+
+	rc = next_entry(buf, fp, sizeof(u32));
+	if (rc)
+		goto out;
+	nel = le32_to_cpu(buf[0]);
+
+	for (i = 0; i < nel; i++) {
+		rc = next_entry(buf, fp, sizeof(u32));
+		if (rc)
+			goto out;
+		len = le32_to_cpu(buf[0]);
+
+		rc = -ENOMEM;
+		newgenfs = kzalloc(sizeof(*newgenfs), GFP_KERNEL);
+		if (!newgenfs)
+			goto out;
+
+		rc = str_read(&newgenfs->fstype, GFP_KERNEL, fp, len);
+		if (rc)
+			goto out;
+
+		for (genfs_p = NULL, genfs = p->genfs; genfs;
+		     genfs_p = genfs, genfs = genfs->next) {
+			rc = -EINVAL;
+			if (strcmp(newgenfs->fstype, genfs->fstype) == 0) {
+				printk(KERN_ERR "SELinux:  dup genfs fstype %s\n",
+				       newgenfs->fstype);
+				goto out;
+			}
+			if (strcmp(newgenfs->fstype, genfs->fstype) < 0)
+				break;
+		}
+		newgenfs->next = genfs;
+		if (genfs_p)
+			genfs_p->next = newgenfs;
+		else
+			p->genfs = newgenfs;
+		genfs = newgenfs;
+		newgenfs = NULL;
+
+		rc = next_entry(buf, fp, sizeof(u32));
+		if (rc)
+			goto out;
+
+		nel2 = le32_to_cpu(buf[0]);
+		for (j = 0; j < nel2; j++) {
+			rc = next_entry(buf, fp, sizeof(u32));
+			if (rc)
+				goto out;
+			len = le32_to_cpu(buf[0]);
+
+			rc = -ENOMEM;
+			newc = kzalloc(sizeof(*newc), GFP_KERNEL);
+			if (!newc)
+				goto out;
+
+			rc = str_read(&newc->u.name, GFP_KERNEL, fp, len);
+			if (rc)
+				goto out;
+
+			rc = next_entry(buf, fp, sizeof(u32));
+			if (rc)
+				goto out;
+
+			newc->v.sclass = le32_to_cpu(buf[0]);
+			rc = context_read_and_validate(&newc->context[0], p, fp);
+			if (rc)
+				goto out;
+
+			for (l = NULL, c = genfs->head; c;
+			     l = c, c = c->next) {
+				rc = -EINVAL;
+				if (!strcmp(newc->u.name, c->u.name) &&
+				    (!c->v.sclass || !newc->v.sclass ||
+				     newc->v.sclass == c->v.sclass)) {
+					printk(KERN_ERR "SELinux:  dup genfs entry (%s,%s)\n",
+					       genfs->fstype, c->u.name);
+					goto out;
+				}
+				len = strlen(newc->u.name);
+				len2 = strlen(c->u.name);
+				if (len > len2)
+					break;
+			}
+
+			newc->next = c;
+			if (l)
+				l->next = newc;
+			else
+				genfs->head = newc;
+			newc = NULL;
+		}
+	}
+	rc = 0;
+out:
+	if (newgenfs)
+		kfree(newgenfs->fstype);
+	kfree(newgenfs);
+	ocontext_destroy(newc, OCON_FSUSE);
+
+	return rc;
+}
+
+static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
+			 void *fp)
+{
+	int i, j, rc;
+	u32 nel, len;
+	__le32 buf[3];
+	struct ocontext *l, *c;
+	u32 nodebuf[8];
+
+	for (i = 0; i < info->ocon_num; i++) {
+		rc = next_entry(buf, fp, sizeof(u32));
+		if (rc)
+			goto out;
+		nel = le32_to_cpu(buf[0]);
+
+		l = NULL;
+		for (j = 0; j < nel; j++) {
+			rc = -ENOMEM;
+			c = kzalloc(sizeof(*c), GFP_KERNEL);
+			if (!c)
+				goto out;
+			if (l)
+				l->next = c;
+			else
+				p->ocontexts[i] = c;
+			l = c;
+
+			switch (i) {
+			case OCON_ISID:
+				rc = next_entry(buf, fp, sizeof(u32));
+				if (rc)
+					goto out;
+
+				c->sid[0] = le32_to_cpu(buf[0]);
+				rc = context_read_and_validate(&c->context[0], p, fp);
+				if (rc)
+					goto out;
+				break;
+			case OCON_FS:
+			case OCON_NETIF:
+				rc = next_entry(buf, fp, sizeof(u32));
+				if (rc)
+					goto out;
+				len = le32_to_cpu(buf[0]);
+
+				rc = str_read(&c->u.name, GFP_KERNEL, fp, len);
+				if (rc)
+					goto out;
+
+				rc = context_read_and_validate(&c->context[0], p, fp);
+				if (rc)
+					goto out;
+				rc = context_read_and_validate(&c->context[1], p, fp);
+				if (rc)
+					goto out;
+				break;
+			case OCON_PORT:
+				rc = next_entry(buf, fp, sizeof(u32)*3);
+				if (rc)
+					goto out;
+				c->u.port.protocol = le32_to_cpu(buf[0]);
+				c->u.port.low_port = le32_to_cpu(buf[1]);
+				c->u.port.high_port = le32_to_cpu(buf[2]);
+				rc = context_read_and_validate(&c->context[0], p, fp);
+				if (rc)
+					goto out;
+				break;
+			case OCON_NODE:
+				rc = next_entry(nodebuf, fp, sizeof(u32) * 2);
+				if (rc)
+					goto out;
+				c->u.node.addr = nodebuf[0]; /* network order */
+				c->u.node.mask = nodebuf[1]; /* network order */
+				rc = context_read_and_validate(&c->context[0], p, fp);
+				if (rc)
+					goto out;
+				break;
+			case OCON_FSUSE:
+				rc = next_entry(buf, fp, sizeof(u32)*2);
+				if (rc)
+					goto out;
+
+				rc = -EINVAL;
+				c->v.behavior = le32_to_cpu(buf[0]);
+				/* Determined at runtime, not in policy DB. */
+				if (c->v.behavior == SECURITY_FS_USE_MNTPOINT)
+					goto out;
+				if (c->v.behavior > SECURITY_FS_USE_MAX)
+					goto out;
+
+				len = le32_to_cpu(buf[1]);
+				rc = str_read(&c->u.name, GFP_KERNEL, fp, len);
+				if (rc)
+					goto out;
+
+				rc = context_read_and_validate(&c->context[0], p, fp);
+				if (rc)
+					goto out;
+				break;
+			case OCON_NODE6: {
+				int k;
+
+				rc = next_entry(nodebuf, fp, sizeof(u32) * 8);
+				if (rc)
+					goto out;
+				for (k = 0; k < 4; k++)
+					c->u.node6.addr[k] = nodebuf[k];
+				for (k = 0; k < 4; k++)
+					c->u.node6.mask[k] = nodebuf[k+4];
+				rc = context_read_and_validate(&c->context[0], p, fp);
+				if (rc)
+					goto out;
+				break;
+			}
+			}
+		}
+	}
+	rc = 0;
+out:
+	return rc;
+}
+
+/*
+ * Read the configuration data from a policy database binary
+ * representation file into a policy database structure.
+ */
+int policydb_read(struct policydb *p, void *fp)
+{
+	struct role_allow *ra, *lra;
+	struct role_trans *tr, *ltr;
+	int i, j, rc;
+	__le32 buf[4];
+	u32 len, nprim, nel;
+
+	char *policydb_str;
+	struct policydb_compat_info *info;
+
+	rc = policydb_init(p);
+	if (rc)
+		return rc;
+
+	/* Read the magic number and string length. */
+	rc = next_entry(buf, fp, sizeof(u32) * 2);
+	if (rc)
+		goto bad;
+
+	rc = -EINVAL;
+	if (le32_to_cpu(buf[0]) != POLICYDB_MAGIC) {
+		printk(KERN_ERR "SELinux:  policydb magic number 0x%x does "
+		       "not match expected magic number 0x%x\n",
+		       le32_to_cpu(buf[0]), POLICYDB_MAGIC);
+		goto bad;
+	}
+
+	rc = -EINVAL;
+	len = le32_to_cpu(buf[1]);
+	if (len != strlen(POLICYDB_STRING)) {
+		printk(KERN_ERR "SELinux:  policydb string length %d does not "
+		       "match expected length %Zu\n",
+		       len, strlen(POLICYDB_STRING));
+		goto bad;
+	}
+
+	rc = -ENOMEM;
+	policydb_str = kmalloc(len + 1, GFP_KERNEL);
+	if (!policydb_str) {
+		printk(KERN_ERR "SELinux:  unable to allocate memory for policydb "
+		       "string of length %d\n", len);
+		goto bad;
+	}
+
+	rc = next_entry(policydb_str, fp, len);
+	if (rc) {
+		printk(KERN_ERR "SELinux:  truncated policydb string identifier\n");
+		kfree(policydb_str);
+		goto bad;
+	}
+
+	rc = -EINVAL;
+	policydb_str[len] = '\0';
+	if (strcmp(policydb_str, POLICYDB_STRING)) {
+		printk(KERN_ERR "SELinux:  policydb string %s does not match "
+		       "my string %s\n", policydb_str, POLICYDB_STRING);
+		kfree(policydb_str);
+		goto bad;
+	}
+	/* Done with policydb_str. */
+	kfree(policydb_str);
+	policydb_str = NULL;
+
+	/* Read the version and table sizes. */
+	rc = next_entry(buf, fp, sizeof(u32)*4);
+	if (rc)
+		goto bad;
+
+	rc = -EINVAL;
+	p->policyvers = le32_to_cpu(buf[0]);
+	if (p->policyvers < POLICYDB_VERSION_MIN ||
+	    p->policyvers > POLICYDB_VERSION_MAX) {
+		printk(KERN_ERR "SELinux:  policydb version %d does not match "
+		       "my version range %d-%d\n",
+		       le32_to_cpu(buf[0]), POLICYDB_VERSION_MIN, POLICYDB_VERSION_MAX);
+		goto bad;
+	}
+
+	if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) {
+		p->mls_enabled = 1;
+
+		rc = -EINVAL;
+		if (p->policyvers < POLICYDB_VERSION_MLS) {
+			printk(KERN_ERR "SELinux: security policydb version %d "
+				"(MLS) not backwards compatible\n",
+				p->policyvers);
+			goto bad;
+		}
+	}
+	p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
+	p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
+
+	if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
+		rc = ebitmap_read(&p->policycaps, fp);
+		if (rc)
+			goto bad;
+	}
+
+	if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
+		rc = ebitmap_read(&p->permissive_map, fp);
+		if (rc)
+			goto bad;
+	}
+
+	rc = -EINVAL;
+	info = policydb_lookup_compat(p->policyvers);
+	if (!info) {
+		printk(KERN_ERR "SELinux:  unable to find policy compat info "
+		       "for version %d\n", p->policyvers);
+		goto bad;
+	}
+
+	rc = -EINVAL;
+	if (le32_to_cpu(buf[2]) != info->sym_num ||
+		le32_to_cpu(buf[3]) != info->ocon_num) {
+		printk(KERN_ERR "SELinux:  policydb table sizes (%d,%d) do "
+		       "not match mine (%d,%d)\n", le32_to_cpu(buf[2]),
+			le32_to_cpu(buf[3]),
+		       info->sym_num, info->ocon_num);
+		goto bad;
+	}
+
+	for (i = 0; i < info->sym_num; i++) {
+		rc = next_entry(buf, fp, sizeof(u32)*2);
+		if (rc)
+			goto bad;
+		nprim = le32_to_cpu(buf[0]);
+		nel = le32_to_cpu(buf[1]);
+		for (j = 0; j < nel; j++) {
+			rc = read_f[i](p, p->symtab[i].table, fp);
+			if (rc)
+				goto bad;
+		}
+
+		p->symtab[i].nprim = nprim;
+	}
+
+	rc = -EINVAL;
+	p->process_class = string_to_security_class(p, "process");
+	if (!p->process_class)
+		goto bad;
+
+	rc = avtab_read(&p->te_avtab, fp, p);
+	if (rc)
+		goto bad;
+
+	if (p->policyvers >= POLICYDB_VERSION_BOOL) {
+		rc = cond_read_list(p, fp);
+		if (rc)
+			goto bad;
+	}
+
+	rc = next_entry(buf, fp, sizeof(u32));
+	if (rc)
+		goto bad;
+	nel = le32_to_cpu(buf[0]);
+	ltr = NULL;
+	for (i = 0; i < nel; i++) {
+		rc = -ENOMEM;
+		tr = kzalloc(sizeof(*tr), GFP_KERNEL);
+		if (!tr)
+			goto bad;
+		if (ltr)
+			ltr->next = tr;
+		else
+			p->role_tr = tr;
+		rc = next_entry(buf, fp, sizeof(u32)*3);
+		if (rc)
+			goto bad;
+
+		rc = -EINVAL;
+		tr->role = le32_to_cpu(buf[0]);
+		tr->type = le32_to_cpu(buf[1]);
+		tr->new_role = le32_to_cpu(buf[2]);
+		if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) {
+			rc = next_entry(buf, fp, sizeof(u32));
+			if (rc)
+				goto bad;
+			tr->tclass = le32_to_cpu(buf[0]);
+		} else
+			tr->tclass = p->process_class;
+
+		if (!policydb_role_isvalid(p, tr->role) ||
+		    !policydb_type_isvalid(p, tr->type) ||
+		    !policydb_class_isvalid(p, tr->tclass) ||
+		    !policydb_role_isvalid(p, tr->new_role))
+			goto bad;
+		ltr = tr;
+	}
+
+	rc = next_entry(buf, fp, sizeof(u32));
+	if (rc)
+		goto bad;
+	nel = le32_to_cpu(buf[0]);
+	lra = NULL;
+	for (i = 0; i < nel; i++) {
+		rc = -ENOMEM;
+		ra = kzalloc(sizeof(*ra), GFP_KERNEL);
+		if (!ra)
+			goto bad;
+		if (lra)
+			lra->next = ra;
+		else
+			p->role_allow = ra;
+		rc = next_entry(buf, fp, sizeof(u32)*2);
+		if (rc)
+			goto bad;
+
+		rc = -EINVAL;
+		ra->role = le32_to_cpu(buf[0]);
+		ra->new_role = le32_to_cpu(buf[1]);
+		if (!policydb_role_isvalid(p, ra->role) ||
+		    !policydb_role_isvalid(p, ra->new_role))
+			goto bad;
+		lra = ra;
+	}
+
+	rc = filename_trans_read(p, fp);
+	if (rc)
+		goto bad;
+
+	rc = policydb_index(p);
+	if (rc)
+		goto bad;
+
+	rc = -EINVAL;
+	p->process_trans_perms = string_to_av_perm(p, p->process_class, "transition");
+	p->process_trans_perms |= string_to_av_perm(p, p->process_class, "dyntransition");
+	if (!p->process_trans_perms)
+		goto bad;
+
+	rc = ocontext_read(p, info, fp);
+	if (rc)
+		goto bad;
+
+	rc = genfs_read(p, fp);
+	if (rc)
+		goto bad;
+
+	rc = range_read(p, fp);
+	if (rc)
+		goto bad;
+
+	rc = -ENOMEM;
+	p->type_attr_map_array = flex_array_alloc(sizeof(struct ebitmap),
+						  p->p_types.nprim,
+						  GFP_KERNEL | __GFP_ZERO);
+	if (!p->type_attr_map_array)
+		goto bad;
+
+	/* preallocate so we don't have to worry about the put ever failing */
+	rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim,
+				 GFP_KERNEL | __GFP_ZERO);
+	if (rc)
+		goto bad;
+
+	for (i = 0; i < p->p_types.nprim; i++) {
+		struct ebitmap *e = flex_array_get(p->type_attr_map_array, i);
+
+		BUG_ON(!e);
+		ebitmap_init(e);
+		if (p->policyvers >= POLICYDB_VERSION_AVTAB) {
+			rc = ebitmap_read(e, fp);
+			if (rc)
+				goto bad;
+		}
+		/* add the type itself as the degenerate case */
+		rc = ebitmap_set_bit(e, i, 1);
+		if (rc)
+			goto bad;
+	}
+
+	rc = policydb_bounds_sanity_check(p);
+	if (rc)
+		goto bad;
+
+	rc = 0;
+out:
+	return rc;
+bad:
+	policydb_destroy(p);
+	goto out;
+}
+
+/*
+ * Write a MLS level structure to a policydb binary
+ * representation file.
+ */
+static int mls_write_level(struct mls_level *l, void *fp)
+{
+	__le32 buf[1];
+	int rc;
+
+	buf[0] = cpu_to_le32(l->sens);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+
+	rc = ebitmap_write(&l->cat, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/*
+ * Write a MLS range structure to a policydb binary
+ * representation file.
+ */
+static int mls_write_range_helper(struct mls_range *r, void *fp)
+{
+	__le32 buf[3];
+	size_t items;
+	int rc, eq;
+
+	eq = mls_level_eq(&r->level[1], &r->level[0]);
+
+	if (eq)
+		items = 2;
+	else
+		items = 3;
+	buf[0] = cpu_to_le32(items-1);
+	buf[1] = cpu_to_le32(r->level[0].sens);
+	if (!eq)
+		buf[2] = cpu_to_le32(r->level[1].sens);
+
+	BUG_ON(items > ARRAY_SIZE(buf));
+
+	rc = put_entry(buf, sizeof(u32), items, fp);
+	if (rc)
+		return rc;
+
+	rc = ebitmap_write(&r->level[0].cat, fp);
+	if (rc)
+		return rc;
+	if (!eq) {
+		rc = ebitmap_write(&r->level[1].cat, fp);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static int sens_write(void *vkey, void *datum, void *ptr)
+{
+	char *key = vkey;
+	struct level_datum *levdatum = datum;
+	struct policy_data *pd = ptr;
+	void *fp = pd->fp;
+	__le32 buf[2];
+	size_t len;
+	int rc;
+
+	len = strlen(key);
+	buf[0] = cpu_to_le32(len);
+	buf[1] = cpu_to_le32(levdatum->isalias);
+	rc = put_entry(buf, sizeof(u32), 2, fp);
+	if (rc)
+		return rc;
+
+	rc = put_entry(key, 1, len, fp);
+	if (rc)
+		return rc;
+
+	rc = mls_write_level(levdatum->level, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int cat_write(void *vkey, void *datum, void *ptr)
+{
+	char *key = vkey;
+	struct cat_datum *catdatum = datum;
+	struct policy_data *pd = ptr;
+	void *fp = pd->fp;
+	__le32 buf[3];
+	size_t len;
+	int rc;
+
+	len = strlen(key);
+	buf[0] = cpu_to_le32(len);
+	buf[1] = cpu_to_le32(catdatum->value);
+	buf[2] = cpu_to_le32(catdatum->isalias);
+	rc = put_entry(buf, sizeof(u32), 3, fp);
+	if (rc)
+		return rc;
+
+	rc = put_entry(key, 1, len, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int role_trans_write(struct policydb *p, void *fp)
+{
+	struct role_trans *r = p->role_tr;
+	struct role_trans *tr;
+	u32 buf[3];
+	size_t nel;
+	int rc;
+
+	nel = 0;
+	for (tr = r; tr; tr = tr->next)
+		nel++;
+	buf[0] = cpu_to_le32(nel);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+	for (tr = r; tr; tr = tr->next) {
+		buf[0] = cpu_to_le32(tr->role);
+		buf[1] = cpu_to_le32(tr->type);
+		buf[2] = cpu_to_le32(tr->new_role);
+		rc = put_entry(buf, sizeof(u32), 3, fp);
+		if (rc)
+			return rc;
+		if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) {
+			buf[0] = cpu_to_le32(tr->tclass);
+			rc = put_entry(buf, sizeof(u32), 1, fp);
+			if (rc)
+				return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int role_allow_write(struct role_allow *r, void *fp)
+{
+	struct role_allow *ra;
+	u32 buf[2];
+	size_t nel;
+	int rc;
+
+	nel = 0;
+	for (ra = r; ra; ra = ra->next)
+		nel++;
+	buf[0] = cpu_to_le32(nel);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+	for (ra = r; ra; ra = ra->next) {
+		buf[0] = cpu_to_le32(ra->role);
+		buf[1] = cpu_to_le32(ra->new_role);
+		rc = put_entry(buf, sizeof(u32), 2, fp);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+/*
+ * Write a security context structure
+ * to a policydb binary representation file.
+ */
+static int context_write(struct policydb *p, struct context *c,
+			 void *fp)
+{
+	int rc;
+	__le32 buf[3];
+
+	buf[0] = cpu_to_le32(c->user);
+	buf[1] = cpu_to_le32(c->role);
+	buf[2] = cpu_to_le32(c->type);
+
+	rc = put_entry(buf, sizeof(u32), 3, fp);
+	if (rc)
+		return rc;
+
+	rc = mls_write_range_helper(&c->range, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/*
+ * The following *_write functions are used to
+ * write the symbol data to a policy database
+ * binary representation file.
+ */
+
+static int perm_write(void *vkey, void *datum, void *fp)
+{
+	char *key = vkey;
+	struct perm_datum *perdatum = datum;
+	__le32 buf[2];
+	size_t len;
+	int rc;
+
+	len = strlen(key);
+	buf[0] = cpu_to_le32(len);
+	buf[1] = cpu_to_le32(perdatum->value);
+	rc = put_entry(buf, sizeof(u32), 2, fp);
+	if (rc)
+		return rc;
+
+	rc = put_entry(key, 1, len, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int common_write(void *vkey, void *datum, void *ptr)
+{
+	char *key = vkey;
+	struct common_datum *comdatum = datum;
+	struct policy_data *pd = ptr;
+	void *fp = pd->fp;
+	__le32 buf[4];
+	size_t len;
+	int rc;
+
+	len = strlen(key);
+	buf[0] = cpu_to_le32(len);
+	buf[1] = cpu_to_le32(comdatum->value);
+	buf[2] = cpu_to_le32(comdatum->permissions.nprim);
+	buf[3] = cpu_to_le32(comdatum->permissions.table->nel);
+	rc = put_entry(buf, sizeof(u32), 4, fp);
+	if (rc)
+		return rc;
+
+	rc = put_entry(key, 1, len, fp);
+	if (rc)
+		return rc;
+
+	rc = hashtab_map(comdatum->permissions.table, perm_write, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int type_set_write(struct type_set *t, void *fp)
+{
+	int rc;
+	__le32 buf[1];
+
+	if (ebitmap_write(&t->types, fp))
+		return -EINVAL;
+	if (ebitmap_write(&t->negset, fp))
+		return -EINVAL;
+
+	buf[0] = cpu_to_le32(t->flags);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int write_cons_helper(struct policydb *p, struct constraint_node *node,
+			     void *fp)
+{
+	struct constraint_node *c;
+	struct constraint_expr *e;
+	__le32 buf[3];
+	u32 nel;
+	int rc;
+
+	for (c = node; c; c = c->next) {
+		nel = 0;
+		for (e = c->expr; e; e = e->next)
+			nel++;
+		buf[0] = cpu_to_le32(c->permissions);
+		buf[1] = cpu_to_le32(nel);
+		rc = put_entry(buf, sizeof(u32), 2, fp);
+		if (rc)
+			return rc;
+		for (e = c->expr; e; e = e->next) {
+			buf[0] = cpu_to_le32(e->expr_type);
+			buf[1] = cpu_to_le32(e->attr);
+			buf[2] = cpu_to_le32(e->op);
+			rc = put_entry(buf, sizeof(u32), 3, fp);
+			if (rc)
+				return rc;
+
+			switch (e->expr_type) {
+			case CEXPR_NAMES:
+				rc = ebitmap_write(&e->names, fp);
+				if (rc)
+					return rc;
+				if (p->policyvers >=
+					POLICYDB_VERSION_CONSTRAINT_NAMES) {
+					rc = type_set_write(e->type_names, fp);
+					if (rc)
+						return rc;
+				}
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int class_write(void *vkey, void *datum, void *ptr)
+{
+	char *key = vkey;
+	struct class_datum *cladatum = datum;
+	struct policy_data *pd = ptr;
+	void *fp = pd->fp;
+	struct policydb *p = pd->p;
+	struct constraint_node *c;
+	__le32 buf[6];
+	u32 ncons;
+	size_t len, len2;
+	int rc;
+
+	len = strlen(key);
+	if (cladatum->comkey)
+		len2 = strlen(cladatum->comkey);
+	else
+		len2 = 0;
+
+	ncons = 0;
+	for (c = cladatum->constraints; c; c = c->next)
+		ncons++;
+
+	buf[0] = cpu_to_le32(len);
+	buf[1] = cpu_to_le32(len2);
+	buf[2] = cpu_to_le32(cladatum->value);
+	buf[3] = cpu_to_le32(cladatum->permissions.nprim);
+	if (cladatum->permissions.table)
+		buf[4] = cpu_to_le32(cladatum->permissions.table->nel);
+	else
+		buf[4] = 0;
+	buf[5] = cpu_to_le32(ncons);
+	rc = put_entry(buf, sizeof(u32), 6, fp);
+	if (rc)
+		return rc;
+
+	rc = put_entry(key, 1, len, fp);
+	if (rc)
+		return rc;
+
+	if (cladatum->comkey) {
+		rc = put_entry(cladatum->comkey, 1, len2, fp);
+		if (rc)
+			return rc;
+	}
+
+	rc = hashtab_map(cladatum->permissions.table, perm_write, fp);
+	if (rc)
+		return rc;
+
+	rc = write_cons_helper(p, cladatum->constraints, fp);
+	if (rc)
+		return rc;
+
+	/* write out the validatetrans rule */
+	ncons = 0;
+	for (c = cladatum->validatetrans; c; c = c->next)
+		ncons++;
+
+	buf[0] = cpu_to_le32(ncons);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+
+	rc = write_cons_helper(p, cladatum->validatetrans, fp);
+	if (rc)
+		return rc;
+
+	if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
+		buf[0] = cpu_to_le32(cladatum->default_user);
+		buf[1] = cpu_to_le32(cladatum->default_role);
+		buf[2] = cpu_to_le32(cladatum->default_range);
+
+		rc = put_entry(buf, sizeof(uint32_t), 3, fp);
+		if (rc)
+			return rc;
+	}
+
+	if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
+		buf[0] = cpu_to_le32(cladatum->default_type);
+		rc = put_entry(buf, sizeof(uint32_t), 1, fp);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static int role_write(void *vkey, void *datum, void *ptr)
+{
+	char *key = vkey;
+	struct role_datum *role = datum;
+	struct policy_data *pd = ptr;
+	void *fp = pd->fp;
+	struct policydb *p = pd->p;
+	__le32 buf[3];
+	size_t items, len;
+	int rc;
+
+	len = strlen(key);
+	items = 0;
+	buf[items++] = cpu_to_le32(len);
+	buf[items++] = cpu_to_le32(role->value);
+	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+		buf[items++] = cpu_to_le32(role->bounds);
+
+	BUG_ON(items > ARRAY_SIZE(buf));
+
+	rc = put_entry(buf, sizeof(u32), items, fp);
+	if (rc)
+		return rc;
+
+	rc = put_entry(key, 1, len, fp);
+	if (rc)
+		return rc;
+
+	rc = ebitmap_write(&role->dominates, fp);
+	if (rc)
+		return rc;
+
+	rc = ebitmap_write(&role->types, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int type_write(void *vkey, void *datum, void *ptr)
+{
+	char *key = vkey;
+	struct type_datum *typdatum = datum;
+	struct policy_data *pd = ptr;
+	struct policydb *p = pd->p;
+	void *fp = pd->fp;
+	__le32 buf[4];
+	int rc;
+	size_t items, len;
+
+	len = strlen(key);
+	items = 0;
+	buf[items++] = cpu_to_le32(len);
+	buf[items++] = cpu_to_le32(typdatum->value);
+	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
+		u32 properties = 0;
+
+		if (typdatum->primary)
+			properties |= TYPEDATUM_PROPERTY_PRIMARY;
+
+		if (typdatum->attribute)
+			properties |= TYPEDATUM_PROPERTY_ATTRIBUTE;
+
+		buf[items++] = cpu_to_le32(properties);
+		buf[items++] = cpu_to_le32(typdatum->bounds);
+	} else {
+		buf[items++] = cpu_to_le32(typdatum->primary);
+	}
+	BUG_ON(items > ARRAY_SIZE(buf));
+	rc = put_entry(buf, sizeof(u32), items, fp);
+	if (rc)
+		return rc;
+
+	rc = put_entry(key, 1, len, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int user_write(void *vkey, void *datum, void *ptr)
+{
+	char *key = vkey;
+	struct user_datum *usrdatum = datum;
+	struct policy_data *pd = ptr;
+	struct policydb *p = pd->p;
+	void *fp = pd->fp;
+	__le32 buf[3];
+	size_t items, len;
+	int rc;
+
+	len = strlen(key);
+	items = 0;
+	buf[items++] = cpu_to_le32(len);
+	buf[items++] = cpu_to_le32(usrdatum->value);
+	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+		buf[items++] = cpu_to_le32(usrdatum->bounds);
+	BUG_ON(items > ARRAY_SIZE(buf));
+	rc = put_entry(buf, sizeof(u32), items, fp);
+	if (rc)
+		return rc;
+
+	rc = put_entry(key, 1, len, fp);
+	if (rc)
+		return rc;
+
+	rc = ebitmap_write(&usrdatum->roles, fp);
+	if (rc)
+		return rc;
+
+	rc = mls_write_range_helper(&usrdatum->range, fp);
+	if (rc)
+		return rc;
+
+	rc = mls_write_level(&usrdatum->dfltlevel, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int (*write_f[SYM_NUM]) (void *key, void *datum,
+				void *datap) =
+{
+	common_write,
+	class_write,
+	role_write,
+	type_write,
+	user_write,
+	cond_write_bool,
+	sens_write,
+	cat_write,
+};
+
+static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
+			  void *fp)
+{
+	unsigned int i, j, rc;
+	size_t nel, len;
+	__le32 buf[3];
+	u32 nodebuf[8];
+	struct ocontext *c;
+	for (i = 0; i < info->ocon_num; i++) {
+		nel = 0;
+		for (c = p->ocontexts[i]; c; c = c->next)
+			nel++;
+		buf[0] = cpu_to_le32(nel);
+		rc = put_entry(buf, sizeof(u32), 1, fp);
+		if (rc)
+			return rc;
+		for (c = p->ocontexts[i]; c; c = c->next) {
+			switch (i) {
+			case OCON_ISID:
+				buf[0] = cpu_to_le32(c->sid[0]);
+				rc = put_entry(buf, sizeof(u32), 1, fp);
+				if (rc)
+					return rc;
+				rc = context_write(p, &c->context[0], fp);
+				if (rc)
+					return rc;
+				break;
+			case OCON_FS:
+			case OCON_NETIF:
+				len = strlen(c->u.name);
+				buf[0] = cpu_to_le32(len);
+				rc = put_entry(buf, sizeof(u32), 1, fp);
+				if (rc)
+					return rc;
+				rc = put_entry(c->u.name, 1, len, fp);
+				if (rc)
+					return rc;
+				rc = context_write(p, &c->context[0], fp);
+				if (rc)
+					return rc;
+				rc = context_write(p, &c->context[1], fp);
+				if (rc)
+					return rc;
+				break;
+			case OCON_PORT:
+				buf[0] = cpu_to_le32(c->u.port.protocol);
+				buf[1] = cpu_to_le32(c->u.port.low_port);
+				buf[2] = cpu_to_le32(c->u.port.high_port);
+				rc = put_entry(buf, sizeof(u32), 3, fp);
+				if (rc)
+					return rc;
+				rc = context_write(p, &c->context[0], fp);
+				if (rc)
+					return rc;
+				break;
+			case OCON_NODE:
+				nodebuf[0] = c->u.node.addr; /* network order */
+				nodebuf[1] = c->u.node.mask; /* network order */
+				rc = put_entry(nodebuf, sizeof(u32), 2, fp);
+				if (rc)
+					return rc;
+				rc = context_write(p, &c->context[0], fp);
+				if (rc)
+					return rc;
+				break;
+			case OCON_FSUSE:
+				buf[0] = cpu_to_le32(c->v.behavior);
+				len = strlen(c->u.name);
+				buf[1] = cpu_to_le32(len);
+				rc = put_entry(buf, sizeof(u32), 2, fp);
+				if (rc)
+					return rc;
+				rc = put_entry(c->u.name, 1, len, fp);
+				if (rc)
+					return rc;
+				rc = context_write(p, &c->context[0], fp);
+				if (rc)
+					return rc;
+				break;
+			case OCON_NODE6:
+				for (j = 0; j < 4; j++)
+					nodebuf[j] = c->u.node6.addr[j]; /* network order */
+				for (j = 0; j < 4; j++)
+					nodebuf[j + 4] = c->u.node6.mask[j]; /* network order */
+				rc = put_entry(nodebuf, sizeof(u32), 8, fp);
+				if (rc)
+					return rc;
+				rc = context_write(p, &c->context[0], fp);
+				if (rc)
+					return rc;
+				break;
+			}
+		}
+	}
+	return 0;
+}
+
+static int genfs_write(struct policydb *p, void *fp)
+{
+	struct genfs *genfs;
+	struct ocontext *c;
+	size_t len;
+	__le32 buf[1];
+	int rc;
+
+	len = 0;
+	for (genfs = p->genfs; genfs; genfs = genfs->next)
+		len++;
+	buf[0] = cpu_to_le32(len);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+	for (genfs = p->genfs; genfs; genfs = genfs->next) {
+		len = strlen(genfs->fstype);
+		buf[0] = cpu_to_le32(len);
+		rc = put_entry(buf, sizeof(u32), 1, fp);
+		if (rc)
+			return rc;
+		rc = put_entry(genfs->fstype, 1, len, fp);
+		if (rc)
+			return rc;
+		len = 0;
+		for (c = genfs->head; c; c = c->next)
+			len++;
+		buf[0] = cpu_to_le32(len);
+		rc = put_entry(buf, sizeof(u32), 1, fp);
+		if (rc)
+			return rc;
+		for (c = genfs->head; c; c = c->next) {
+			len = strlen(c->u.name);
+			buf[0] = cpu_to_le32(len);
+			rc = put_entry(buf, sizeof(u32), 1, fp);
+			if (rc)
+				return rc;
+			rc = put_entry(c->u.name, 1, len, fp);
+			if (rc)
+				return rc;
+			buf[0] = cpu_to_le32(c->v.sclass);
+			rc = put_entry(buf, sizeof(u32), 1, fp);
+			if (rc)
+				return rc;
+			rc = context_write(p, &c->context[0], fp);
+			if (rc)
+				return rc;
+		}
+	}
+	return 0;
+}
+
+static int hashtab_cnt(void *key, void *data, void *ptr)
+{
+	int *cnt = ptr;
+	*cnt = *cnt + 1;
+
+	return 0;
+}
+
+static int range_write_helper(void *key, void *data, void *ptr)
+{
+	__le32 buf[2];
+	struct range_trans *rt = key;
+	struct mls_range *r = data;
+	struct policy_data *pd = ptr;
+	void *fp = pd->fp;
+	struct policydb *p = pd->p;
+	int rc;
+
+	buf[0] = cpu_to_le32(rt->source_type);
+	buf[1] = cpu_to_le32(rt->target_type);
+	rc = put_entry(buf, sizeof(u32), 2, fp);
+	if (rc)
+		return rc;
+	if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
+		buf[0] = cpu_to_le32(rt->target_class);
+		rc = put_entry(buf, sizeof(u32), 1, fp);
+		if (rc)
+			return rc;
+	}
+	rc = mls_write_range_helper(r, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int range_write(struct policydb *p, void *fp)
+{
+	__le32 buf[1];
+	int rc, nel;
+	struct policy_data pd;
+
+	pd.p = p;
+	pd.fp = fp;
+
+	/* count the number of entries in the hashtab */
+	nel = 0;
+	rc = hashtab_map(p->range_tr, hashtab_cnt, &nel);
+	if (rc)
+		return rc;
+
+	buf[0] = cpu_to_le32(nel);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+
+	/* actually write all of the entries */
+	rc = hashtab_map(p->range_tr, range_write_helper, &pd);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int filename_write_helper(void *key, void *data, void *ptr)
+{
+	__le32 buf[4];
+	struct filename_trans *ft = key;
+	struct filename_trans_datum *otype = data;
+	void *fp = ptr;
+	int rc;
+	u32 len;
+
+	len = strlen(ft->name);
+	buf[0] = cpu_to_le32(len);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+
+	rc = put_entry(ft->name, sizeof(char), len, fp);
+	if (rc)
+		return rc;
+
+	buf[0] = cpu_to_le32(ft->stype);
+	buf[1] = cpu_to_le32(ft->ttype);
+	buf[2] = cpu_to_le32(ft->tclass);
+	buf[3] = cpu_to_le32(otype->otype);
+
+	rc = put_entry(buf, sizeof(u32), 4, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int filename_trans_write(struct policydb *p, void *fp)
+{
+	u32 nel;
+	__le32 buf[1];
+	int rc;
+
+	if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
+		return 0;
+
+	nel = 0;
+	rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel);
+	if (rc)
+		return rc;
+
+	buf[0] = cpu_to_le32(nel);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return rc;
+
+	rc = hashtab_map(p->filename_trans, filename_write_helper, fp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/*
+ * Write the configuration data in a policy database
+ * structure to a policy database binary representation
+ * file.
+ */
+int policydb_write(struct policydb *p, void *fp)
+{
+	unsigned int i, num_syms;
+	int rc;
+	__le32 buf[4];
+	u32 config;
+	size_t len;
+	struct policydb_compat_info *info;
+
+	/*
+	 * refuse to write policy older than compressed avtab
+	 * to simplify the writer.  There are other tests dropped
+	 * since we assume this throughout the writer code.  Be
+	 * careful if you ever try to remove this restriction
+	 */
+	if (p->policyvers < POLICYDB_VERSION_AVTAB) {
+		printk(KERN_ERR "SELinux: refusing to write policy version %d."
+		       "  Because it is less than version %d\n", p->policyvers,
+		       POLICYDB_VERSION_AVTAB);
+		return -EINVAL;
+	}
+
+	config = 0;
+	if (p->mls_enabled)
+		config |= POLICYDB_CONFIG_MLS;
+
+	if (p->reject_unknown)
+		config |= REJECT_UNKNOWN;
+	if (p->allow_unknown)
+		config |= ALLOW_UNKNOWN;
+
+	/* Write the magic number and string identifiers. */
+	buf[0] = cpu_to_le32(POLICYDB_MAGIC);
+	len = strlen(POLICYDB_STRING);
+	buf[1] = cpu_to_le32(len);
+	rc = put_entry(buf, sizeof(u32), 2, fp);
+	if (rc)
+		return rc;
+	rc = put_entry(POLICYDB_STRING, 1, len, fp);
+	if (rc)
+		return rc;
+
+	/* Write the version, config, and table sizes. */
+	info = policydb_lookup_compat(p->policyvers);
+	if (!info) {
+		printk(KERN_ERR "SELinux: compatibility lookup failed for policy "
+		    "version %d", p->policyvers);
+		return -EINVAL;
+	}
+
+	buf[0] = cpu_to_le32(p->policyvers);
+	buf[1] = cpu_to_le32(config);
+	buf[2] = cpu_to_le32(info->sym_num);
+	buf[3] = cpu_to_le32(info->ocon_num);
+
+	rc = put_entry(buf, sizeof(u32), 4, fp);
+	if (rc)
+		return rc;
+
+	if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
+		rc = ebitmap_write(&p->policycaps, fp);
+		if (rc)
+			return rc;
+	}
+
+	if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
+		rc = ebitmap_write(&p->permissive_map, fp);
+		if (rc)
+			return rc;
+	}
+
+	num_syms = info->sym_num;
+	for (i = 0; i < num_syms; i++) {
+		struct policy_data pd;
+
+		pd.fp = fp;
+		pd.p = p;
+
+		buf[0] = cpu_to_le32(p->symtab[i].nprim);
+		buf[1] = cpu_to_le32(p->symtab[i].table->nel);
+
+		rc = put_entry(buf, sizeof(u32), 2, fp);
+		if (rc)
+			return rc;
+		rc = hashtab_map(p->symtab[i].table, write_f[i], &pd);
+		if (rc)
+			return rc;
+	}
+
+	rc = avtab_write(p, &p->te_avtab, fp);
+	if (rc)
+		return rc;
+
+	rc = cond_write_list(p, p->cond_list, fp);
+	if (rc)
+		return rc;
+
+	rc = role_trans_write(p, fp);
+	if (rc)
+		return rc;
+
+	rc = role_allow_write(p->role_allow, fp);
+	if (rc)
+		return rc;
+
+	rc = filename_trans_write(p, fp);
+	if (rc)
+		return rc;
+
+	rc = ocontext_write(p, info, fp);
+	if (rc)
+		return rc;
+
+	rc = genfs_write(p, fp);
+	if (rc)
+		return rc;
+
+	rc = range_write(p, fp);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < p->p_types.nprim; i++) {
+		struct ebitmap *e = flex_array_get(p->type_attr_map_array, i);
+
+		BUG_ON(!e);
+		rc = ebitmap_write(e, fp);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
new file mode 100644
index 0000000..725d594
--- /dev/null
+++ b/security/selinux/ss/policydb.h
@@ -0,0 +1,370 @@
+/*
+ * A policy database (policydb) specifies the
+ * configuration data for the security policy.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+
+/*
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ *	Support for enhanced MLS infrastructure.
+ *
+ * Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
+ *
+ *	Added conditional policy language extensions
+ *
+ * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation, version 2.
+ */
+
+#ifndef _SS_POLICYDB_H_
+#define _SS_POLICYDB_H_
+
+#include <linux/flex_array.h>
+
+#include "symtab.h"
+#include "avtab.h"
+#include "sidtab.h"
+#include "ebitmap.h"
+#include "mls_types.h"
+#include "context.h"
+#include "constraint.h"
+
+/*
+ * A datum type is defined for each kind of symbol
+ * in the configuration data:  individual permissions,
+ * common prefixes for access vectors, classes,
+ * users, roles, types, sensitivities, categories, etc.
+ */
+
+/* Permission attributes */
+struct perm_datum {
+	u32 value;		/* permission bit + 1 */
+};
+
+/* Attributes of a common prefix for access vectors */
+struct common_datum {
+	u32 value;			/* internal common value */
+	struct symtab permissions;	/* common permissions */
+};
+
+/* Class attributes */
+struct class_datum {
+	u32 value;			/* class value */
+	char *comkey;			/* common name */
+	struct common_datum *comdatum;	/* common datum */
+	struct symtab permissions;	/* class-specific permission symbol table */
+	struct constraint_node *constraints;	/* constraints on class permissions */
+	struct constraint_node *validatetrans;	/* special transition rules */
+/* Options how a new object user, role, and type should be decided */
+#define DEFAULT_SOURCE         1
+#define DEFAULT_TARGET         2
+	char default_user;
+	char default_role;
+	char default_type;
+/* Options how a new object range should be decided */
+#define DEFAULT_SOURCE_LOW     1
+#define DEFAULT_SOURCE_HIGH    2
+#define DEFAULT_SOURCE_LOW_HIGH        3
+#define DEFAULT_TARGET_LOW     4
+#define DEFAULT_TARGET_HIGH    5
+#define DEFAULT_TARGET_LOW_HIGH        6
+	char default_range;
+};
+
+/* Role attributes */
+struct role_datum {
+	u32 value;			/* internal role value */
+	u32 bounds;			/* boundary of role */
+	struct ebitmap dominates;	/* set of roles dominated by this role */
+	struct ebitmap types;		/* set of authorized types for role */
+};
+
+struct role_trans {
+	u32 role;		/* current role */
+	u32 type;		/* program executable type, or new object type */
+	u32 tclass;		/* process class, or new object class */
+	u32 new_role;		/* new role */
+	struct role_trans *next;
+};
+
+struct filename_trans {
+	u32 stype;		/* current process */
+	u32 ttype;		/* parent dir context */
+	u16 tclass;		/* class of new object */
+	const char *name;	/* last path component */
+};
+
+struct filename_trans_datum {
+	u32 otype;		/* expected of new object */
+};
+
+struct role_allow {
+	u32 role;		/* current role */
+	u32 new_role;		/* new role */
+	struct role_allow *next;
+};
+
+/* Type attributes */
+struct type_datum {
+	u32 value;		/* internal type value */
+	u32 bounds;		/* boundary of type */
+	unsigned char primary;	/* primary name? */
+	unsigned char attribute;/* attribute ?*/
+};
+
+/* User attributes */
+struct user_datum {
+	u32 value;			/* internal user value */
+	u32 bounds;			/* bounds of user */
+	struct ebitmap roles;		/* set of authorized roles for user */
+	struct mls_range range;		/* MLS range (min - max) for user */
+	struct mls_level dfltlevel;	/* default login MLS level for user */
+};
+
+
+/* Sensitivity attributes */
+struct level_datum {
+	struct mls_level *level;	/* sensitivity and associated categories */
+	unsigned char isalias;	/* is this sensitivity an alias for another? */
+};
+
+/* Category attributes */
+struct cat_datum {
+	u32 value;		/* internal category bit + 1 */
+	unsigned char isalias;  /* is this category an alias for another? */
+};
+
+struct range_trans {
+	u32 source_type;
+	u32 target_type;
+	u32 target_class;
+};
+
+/* Boolean data type */
+struct cond_bool_datum {
+	__u32 value;		/* internal type value */
+	int state;
+};
+
+struct cond_node;
+
+/*
+ * type set preserves data needed to determine constraint info from
+ * policy source. This is not used by the kernel policy but allows
+ * utilities such as audit2allow to determine constraint denials.
+ */
+struct type_set {
+	struct ebitmap types;
+	struct ebitmap negset;
+	u32 flags;
+};
+
+/*
+ * The configuration data includes security contexts for
+ * initial SIDs, unlabeled file systems, TCP and UDP port numbers,
+ * network interfaces, and nodes.  This structure stores the
+ * relevant data for one such entry.  Entries of the same kind
+ * (e.g. all initial SIDs) are linked together into a list.
+ */
+struct ocontext {
+	union {
+		char *name;	/* name of initial SID, fs, netif, fstype, path */
+		struct {
+			u8 protocol;
+			u16 low_port;
+			u16 high_port;
+		} port;		/* TCP or UDP port information */
+		struct {
+			u32 addr;
+			u32 mask;
+		} node;		/* node information */
+		struct {
+			u32 addr[4];
+			u32 mask[4];
+		} node6;        /* IPv6 node information */
+	} u;
+	union {
+		u32 sclass;  /* security class for genfs */
+		u32 behavior;  /* labeling behavior for fs_use */
+	} v;
+	struct context context[2];	/* security context(s) */
+	u32 sid[2];	/* SID(s) */
+	struct ocontext *next;
+};
+
+struct genfs {
+	char *fstype;
+	struct ocontext *head;
+	struct genfs *next;
+};
+
+/* symbol table array indices */
+#define SYM_COMMONS 0
+#define SYM_CLASSES 1
+#define SYM_ROLES   2
+#define SYM_TYPES   3
+#define SYM_USERS   4
+#define SYM_BOOLS   5
+#define SYM_LEVELS  6
+#define SYM_CATS    7
+#define SYM_NUM     8
+
+/* object context array indices */
+#define OCON_ISID  0	/* initial SIDs */
+#define OCON_FS    1	/* unlabeled file systems */
+#define OCON_PORT  2	/* TCP and UDP port numbers */
+#define OCON_NETIF 3	/* network interfaces */
+#define OCON_NODE  4	/* nodes */
+#define OCON_FSUSE 5	/* fs_use */
+#define OCON_NODE6 6	/* IPv6 nodes */
+#define OCON_NUM   7
+
+/* The policy database */
+struct policydb {
+	int mls_enabled;
+
+	/* symbol tables */
+	struct symtab symtab[SYM_NUM];
+#define p_commons symtab[SYM_COMMONS]
+#define p_classes symtab[SYM_CLASSES]
+#define p_roles symtab[SYM_ROLES]
+#define p_types symtab[SYM_TYPES]
+#define p_users symtab[SYM_USERS]
+#define p_bools symtab[SYM_BOOLS]
+#define p_levels symtab[SYM_LEVELS]
+#define p_cats symtab[SYM_CATS]
+
+	/* symbol names indexed by (value - 1) */
+	struct flex_array *sym_val_to_name[SYM_NUM];
+
+	/* class, role, and user attributes indexed by (value - 1) */
+	struct class_datum **class_val_to_struct;
+	struct role_datum **role_val_to_struct;
+	struct user_datum **user_val_to_struct;
+	struct flex_array *type_val_to_struct_array;
+
+	/* type enforcement access vectors and transitions */
+	struct avtab te_avtab;
+
+	/* role transitions */
+	struct role_trans *role_tr;
+
+	/* file transitions with the last path component */
+	/* quickly exclude lookups when parent ttype has no rules */
+	struct ebitmap filename_trans_ttypes;
+	/* actual set of filename_trans rules */
+	struct hashtab *filename_trans;
+
+	/* bools indexed by (value - 1) */
+	struct cond_bool_datum **bool_val_to_struct;
+	/* type enforcement conditional access vectors and transitions */
+	struct avtab te_cond_avtab;
+	/* linked list indexing te_cond_avtab by conditional */
+	struct cond_node *cond_list;
+
+	/* role allows */
+	struct role_allow *role_allow;
+
+	/* security contexts of initial SIDs, unlabeled file systems,
+	   TCP or UDP port numbers, network interfaces and nodes */
+	struct ocontext *ocontexts[OCON_NUM];
+
+	/* security contexts for files in filesystems that cannot support
+	   a persistent label mapping or use another
+	   fixed labeling behavior. */
+	struct genfs *genfs;
+
+	/* range transitions table (range_trans_key -> mls_range) */
+	struct hashtab *range_tr;
+
+	/* type -> attribute reverse mapping */
+	struct flex_array *type_attr_map_array;
+
+	struct ebitmap policycaps;
+
+	struct ebitmap permissive_map;
+
+	/* length of this policy when it was loaded */
+	size_t len;
+
+	unsigned int policyvers;
+
+	unsigned int reject_unknown : 1;
+	unsigned int allow_unknown : 1;
+
+	u16 process_class;
+	u32 process_trans_perms;
+};
+
+extern void policydb_destroy(struct policydb *p);
+extern int policydb_load_isids(struct policydb *p, struct sidtab *s);
+extern int policydb_context_isvalid(struct policydb *p, struct context *c);
+extern int policydb_class_isvalid(struct policydb *p, unsigned int class);
+extern int policydb_type_isvalid(struct policydb *p, unsigned int type);
+extern int policydb_role_isvalid(struct policydb *p, unsigned int role);
+extern int policydb_read(struct policydb *p, void *fp);
+extern int policydb_write(struct policydb *p, void *fp);
+
+#define PERM_SYMTAB_SIZE 32
+
+#define POLICYDB_CONFIG_MLS    1
+
+/* the config flags related to unknown classes/perms are bits 2 and 3 */
+#define REJECT_UNKNOWN	0x00000002
+#define ALLOW_UNKNOWN	0x00000004
+
+#define OBJECT_R "object_r"
+#define OBJECT_R_VAL 1
+
+#define POLICYDB_MAGIC SELINUX_MAGIC
+#define POLICYDB_STRING "SE Linux"
+
+struct policy_file {
+	char *data;
+	size_t len;
+};
+
+struct policy_data {
+	struct policydb *p;
+	void *fp;
+};
+
+static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
+{
+	if (bytes > fp->len)
+		return -EINVAL;
+
+	memcpy(buf, fp->data, bytes);
+	fp->data += bytes;
+	fp->len -= bytes;
+	return 0;
+}
+
+static inline int put_entry(const void *buf, size_t bytes, int num, struct policy_file *fp)
+{
+	size_t len = bytes * num;
+
+	memcpy(fp->data, buf, len);
+	fp->data += len;
+	fp->len -= len;
+
+	return 0;
+}
+
+static inline char *sym_name(struct policydb *p, unsigned int sym_num, unsigned int element_nr)
+{
+	struct flex_array *fa = p->sym_val_to_name[sym_num];
+
+	return flex_array_get_ptr(fa, element_nr);
+}
+
+extern u16 string_to_security_class(struct policydb *p, const char *name);
+extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
+
+#endif	/* _SS_POLICYDB_H_ */
+
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
new file mode 100644
index 0000000..ebb5eb3
--- /dev/null
+++ b/security/selinux/ss/services.c
@@ -0,0 +1,3452 @@
+/*
+ * Implementation of the security services.
+ *
+ * Authors : Stephen Smalley, <sds@epoch.ncsc.mil>
+ *	     James Morris <jmorris@redhat.com>
+ *
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ *	Support for enhanced MLS infrastructure.
+ *	Support for context based audit filters.
+ *
+ * Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
+ *
+ *	Added conditional policy language extensions
+ *
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ *      Added support for NetLabel
+ *      Added support for the policy capability bitmap
+ *
+ * Updated: Chad Sellers <csellers@tresys.com>
+ *
+ *  Added validation of kernel classes and permissions
+ *
+ * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ *  Added support for bounds domain and audit messaged on masked permissions
+ *
+ * Updated: Guido Trentalancia <guido@trentalancia.com>
+ *
+ *  Added support for runtime switching of the policy type
+ *
+ * Copyright (C) 2008, 2009 NEC Corporation
+ * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
+ * Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC
+ * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation, version 2.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/sched.h>
+#include <linux/audit.h>
+#include <linux/mutex.h>
+#include <linux/selinux.h>
+#include <linux/flex_array.h>
+#include <linux/vmalloc.h>
+#include <net/netlabel.h>
+
+#include "flask.h"
+#include "avc.h"
+#include "avc_ss.h"
+#include "security.h"
+#include "context.h"
+#include "policydb.h"
+#include "sidtab.h"
+#include "services.h"
+#include "conditional.h"
+#include "mls.h"
+#include "objsec.h"
+#include "netlabel.h"
+#include "xfrm.h"
+#include "ebitmap.h"
+#include "audit.h"
+
+int selinux_policycap_netpeer;
+int selinux_policycap_openperm;
+int selinux_policycap_alwaysnetwork;
+
+static DEFINE_RWLOCK(policy_rwlock);
+
+static struct sidtab sidtab;
+struct policydb policydb;
+int ss_initialized;
+
+/*
+ * The largest sequence number that has been used when
+ * providing an access decision to the access vector cache.
+ * The sequence number only changes when a policy change
+ * occurs.
+ */
+static u32 latest_granting;
+
+/* Forward declaration. */
+static int context_struct_to_string(struct context *context, char **scontext,
+				    u32 *scontext_len);
+
+static void context_struct_compute_av(struct context *scontext,
+					struct context *tcontext,
+					u16 tclass,
+					struct av_decision *avd,
+					struct extended_perms *xperms);
+
+struct selinux_mapping {
+	u16 value; /* policy value */
+	unsigned num_perms;
+	u32 perms[sizeof(u32) * 8];
+};
+
+static struct selinux_mapping *current_mapping;
+static u16 current_mapping_size;
+
+static int selinux_set_mapping(struct policydb *pol,
+			       struct security_class_mapping *map,
+			       struct selinux_mapping **out_map_p,
+			       u16 *out_map_size)
+{
+	struct selinux_mapping *out_map = NULL;
+	size_t size = sizeof(struct selinux_mapping);
+	u16 i, j;
+	unsigned k;
+	bool print_unknown_handle = false;
+
+	/* Find number of classes in the input mapping */
+	if (!map)
+		return -EINVAL;
+	i = 0;
+	while (map[i].name)
+		i++;
+
+	/* Allocate space for the class records, plus one for class zero */
+	out_map = kcalloc(++i, size, GFP_ATOMIC);
+	if (!out_map)
+		return -ENOMEM;
+
+	/* Store the raw class and permission values */
+	j = 0;
+	while (map[j].name) {
+		struct security_class_mapping *p_in = map + (j++);
+		struct selinux_mapping *p_out = out_map + j;
+
+		/* An empty class string skips ahead */
+		if (!strcmp(p_in->name, "")) {
+			p_out->num_perms = 0;
+			continue;
+		}
+
+		p_out->value = string_to_security_class(pol, p_in->name);
+		if (!p_out->value) {
+			printk(KERN_INFO
+			       "SELinux:  Class %s not defined in policy.\n",
+			       p_in->name);
+			if (pol->reject_unknown)
+				goto err;
+			p_out->num_perms = 0;
+			print_unknown_handle = true;
+			continue;
+		}
+
+		k = 0;
+		while (p_in->perms && p_in->perms[k]) {
+			/* An empty permission string skips ahead */
+			if (!*p_in->perms[k]) {
+				k++;
+				continue;
+			}
+			p_out->perms[k] = string_to_av_perm(pol, p_out->value,
+							    p_in->perms[k]);
+			if (!p_out->perms[k]) {
+				printk(KERN_INFO
+				       "SELinux:  Permission %s in class %s not defined in policy.\n",
+				       p_in->perms[k], p_in->name);
+				if (pol->reject_unknown)
+					goto err;
+				print_unknown_handle = true;
+			}
+
+			k++;
+		}
+		p_out->num_perms = k;
+	}
+
+	if (print_unknown_handle)
+		printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n",
+		       pol->allow_unknown ? "allowed" : "denied");
+
+	*out_map_p = out_map;
+	*out_map_size = i;
+	return 0;
+err:
+	kfree(out_map);
+	return -EINVAL;
+}
+
+/*
+ * Get real, policy values from mapped values
+ */
+
+static u16 unmap_class(u16 tclass)
+{
+	if (tclass < current_mapping_size)
+		return current_mapping[tclass].value;
+
+	return tclass;
+}
+
+/*
+ * Get kernel value for class from its policy value
+ */
+static u16 map_class(u16 pol_value)
+{
+	u16 i;
+
+	for (i = 1; i < current_mapping_size; i++) {
+		if (current_mapping[i].value == pol_value)
+			return i;
+	}
+
+	return SECCLASS_NULL;
+}
+
+static void map_decision(u16 tclass, struct av_decision *avd,
+			 int allow_unknown)
+{
+	if (tclass < current_mapping_size) {
+		unsigned i, n = current_mapping[tclass].num_perms;
+		u32 result;
+
+		for (i = 0, result = 0; i < n; i++) {
+			if (avd->allowed & current_mapping[tclass].perms[i])
+				result |= 1<<i;
+			if (allow_unknown && !current_mapping[tclass].perms[i])
+				result |= 1<<i;
+		}
+		avd->allowed = result;
+
+		for (i = 0, result = 0; i < n; i++)
+			if (avd->auditallow & current_mapping[tclass].perms[i])
+				result |= 1<<i;
+		avd->auditallow = result;
+
+		for (i = 0, result = 0; i < n; i++) {
+			if (avd->auditdeny & current_mapping[tclass].perms[i])
+				result |= 1<<i;
+			if (!allow_unknown && !current_mapping[tclass].perms[i])
+				result |= 1<<i;
+		}
+		/*
+		 * In case the kernel has a bug and requests a permission
+		 * between num_perms and the maximum permission number, we
+		 * should audit that denial
+		 */
+		for (; i < (sizeof(u32)*8); i++)
+			result |= 1<<i;
+		avd->auditdeny = result;
+	}
+}
+
+int security_mls_enabled(void)
+{
+	return policydb.mls_enabled;
+}
+
+/*
+ * Return the boolean value of a constraint expression
+ * when it is applied to the specified source and target
+ * security contexts.
+ *
+ * xcontext is a special beast...  It is used by the validatetrans rules
+ * only.  For these rules, scontext is the context before the transition,
+ * tcontext is the context after the transition, and xcontext is the context
+ * of the process performing the transition.  All other callers of
+ * constraint_expr_eval should pass in NULL for xcontext.
+ */
+static int constraint_expr_eval(struct context *scontext,
+				struct context *tcontext,
+				struct context *xcontext,
+				struct constraint_expr *cexpr)
+{
+	u32 val1, val2;
+	struct context *c;
+	struct role_datum *r1, *r2;
+	struct mls_level *l1, *l2;
+	struct constraint_expr *e;
+	int s[CEXPR_MAXDEPTH];
+	int sp = -1;
+
+	for (e = cexpr; e; e = e->next) {
+		switch (e->expr_type) {
+		case CEXPR_NOT:
+			BUG_ON(sp < 0);
+			s[sp] = !s[sp];
+			break;
+		case CEXPR_AND:
+			BUG_ON(sp < 1);
+			sp--;
+			s[sp] &= s[sp + 1];
+			break;
+		case CEXPR_OR:
+			BUG_ON(sp < 1);
+			sp--;
+			s[sp] |= s[sp + 1];
+			break;
+		case CEXPR_ATTR:
+			if (sp == (CEXPR_MAXDEPTH - 1))
+				return 0;
+			switch (e->attr) {
+			case CEXPR_USER:
+				val1 = scontext->user;
+				val2 = tcontext->user;
+				break;
+			case CEXPR_TYPE:
+				val1 = scontext->type;
+				val2 = tcontext->type;
+				break;
+			case CEXPR_ROLE:
+				val1 = scontext->role;
+				val2 = tcontext->role;
+				r1 = policydb.role_val_to_struct[val1 - 1];
+				r2 = policydb.role_val_to_struct[val2 - 1];
+				switch (e->op) {
+				case CEXPR_DOM:
+					s[++sp] = ebitmap_get_bit(&r1->dominates,
+								  val2 - 1);
+					continue;
+				case CEXPR_DOMBY:
+					s[++sp] = ebitmap_get_bit(&r2->dominates,
+								  val1 - 1);
+					continue;
+				case CEXPR_INCOMP:
+					s[++sp] = (!ebitmap_get_bit(&r1->dominates,
+								    val2 - 1) &&
+						   !ebitmap_get_bit(&r2->dominates,
+								    val1 - 1));
+					continue;
+				default:
+					break;
+				}
+				break;
+			case CEXPR_L1L2:
+				l1 = &(scontext->range.level[0]);
+				l2 = &(tcontext->range.level[0]);
+				goto mls_ops;
+			case CEXPR_L1H2:
+				l1 = &(scontext->range.level[0]);
+				l2 = &(tcontext->range.level[1]);
+				goto mls_ops;
+			case CEXPR_H1L2:
+				l1 = &(scontext->range.level[1]);
+				l2 = &(tcontext->range.level[0]);
+				goto mls_ops;
+			case CEXPR_H1H2:
+				l1 = &(scontext->range.level[1]);
+				l2 = &(tcontext->range.level[1]);
+				goto mls_ops;
+			case CEXPR_L1H1:
+				l1 = &(scontext->range.level[0]);
+				l2 = &(scontext->range.level[1]);
+				goto mls_ops;
+			case CEXPR_L2H2:
+				l1 = &(tcontext->range.level[0]);
+				l2 = &(tcontext->range.level[1]);
+				goto mls_ops;
+mls_ops:
+			switch (e->op) {
+			case CEXPR_EQ:
+				s[++sp] = mls_level_eq(l1, l2);
+				continue;
+			case CEXPR_NEQ:
+				s[++sp] = !mls_level_eq(l1, l2);
+				continue;
+			case CEXPR_DOM:
+				s[++sp] = mls_level_dom(l1, l2);
+				continue;
+			case CEXPR_DOMBY:
+				s[++sp] = mls_level_dom(l2, l1);
+				continue;
+			case CEXPR_INCOMP:
+				s[++sp] = mls_level_incomp(l2, l1);
+				continue;
+			default:
+				BUG();
+				return 0;
+			}
+			break;
+			default:
+				BUG();
+				return 0;
+			}
+
+			switch (e->op) {
+			case CEXPR_EQ:
+				s[++sp] = (val1 == val2);
+				break;
+			case CEXPR_NEQ:
+				s[++sp] = (val1 != val2);
+				break;
+			default:
+				BUG();
+				return 0;
+			}
+			break;
+		case CEXPR_NAMES:
+			if (sp == (CEXPR_MAXDEPTH-1))
+				return 0;
+			c = scontext;
+			if (e->attr & CEXPR_TARGET)
+				c = tcontext;
+			else if (e->attr & CEXPR_XTARGET) {
+				c = xcontext;
+				if (!c) {
+					BUG();
+					return 0;
+				}
+			}
+			if (e->attr & CEXPR_USER)
+				val1 = c->user;
+			else if (e->attr & CEXPR_ROLE)
+				val1 = c->role;
+			else if (e->attr & CEXPR_TYPE)
+				val1 = c->type;
+			else {
+				BUG();
+				return 0;
+			}
+
+			switch (e->op) {
+			case CEXPR_EQ:
+				s[++sp] = ebitmap_get_bit(&e->names, val1 - 1);
+				break;
+			case CEXPR_NEQ:
+				s[++sp] = !ebitmap_get_bit(&e->names, val1 - 1);
+				break;
+			default:
+				BUG();
+				return 0;
+			}
+			break;
+		default:
+			BUG();
+			return 0;
+		}
+	}
+
+	BUG_ON(sp != 0);
+	return s[0];
+}
+
+/*
+ * security_dump_masked_av - dumps masked permissions during
+ * security_compute_av due to RBAC, MLS/Constraint and Type bounds.
+ */
+static int dump_masked_av_helper(void *k, void *d, void *args)
+{
+	struct perm_datum *pdatum = d;
+	char **permission_names = args;
+
+	BUG_ON(pdatum->value < 1 || pdatum->value > 32);
+
+	permission_names[pdatum->value - 1] = (char *)k;
+
+	return 0;
+}
+
+static void security_dump_masked_av(struct context *scontext,
+				    struct context *tcontext,
+				    u16 tclass,
+				    u32 permissions,
+				    const char *reason)
+{
+	struct common_datum *common_dat;
+	struct class_datum *tclass_dat;
+	struct audit_buffer *ab;
+	char *tclass_name;
+	char *scontext_name = NULL;
+	char *tcontext_name = NULL;
+	char *permission_names[32];
+	int index;
+	u32 length;
+	bool need_comma = false;
+
+	if (!permissions)
+		return;
+
+	tclass_name = sym_name(&policydb, SYM_CLASSES, tclass - 1);
+	tclass_dat = policydb.class_val_to_struct[tclass - 1];
+	common_dat = tclass_dat->comdatum;
+
+	/* init permission_names */
+	if (common_dat &&
+	    hashtab_map(common_dat->permissions.table,
+			dump_masked_av_helper, permission_names) < 0)
+		goto out;
+
+	if (hashtab_map(tclass_dat->permissions.table,
+			dump_masked_av_helper, permission_names) < 0)
+		goto out;
+
+	/* get scontext/tcontext in text form */
+	if (context_struct_to_string(scontext,
+				     &scontext_name, &length) < 0)
+		goto out;
+
+	if (context_struct_to_string(tcontext,
+				     &tcontext_name, &length) < 0)
+		goto out;
+
+	/* audit a message */
+	ab = audit_log_start(current->audit_context,
+			     GFP_ATOMIC, AUDIT_SELINUX_ERR);
+	if (!ab)
+		goto out;
+
+	audit_log_format(ab, "op=security_compute_av reason=%s "
+			 "scontext=%s tcontext=%s tclass=%s perms=",
+			 reason, scontext_name, tcontext_name, tclass_name);
+
+	for (index = 0; index < 32; index++) {
+		u32 mask = (1 << index);
+
+		if ((mask & permissions) == 0)
+			continue;
+
+		audit_log_format(ab, "%s%s",
+				 need_comma ? "," : "",
+				 permission_names[index]
+				 ? permission_names[index] : "????");
+		need_comma = true;
+	}
+	audit_log_end(ab);
+out:
+	/* release scontext/tcontext */
+	kfree(tcontext_name);
+	kfree(scontext_name);
+
+	return;
+}
+
+/*
+ * security_boundary_permission - drops violated permissions
+ * on boundary constraint.
+ */
+static void type_attribute_bounds_av(struct context *scontext,
+				     struct context *tcontext,
+				     u16 tclass,
+				     struct av_decision *avd)
+{
+	struct context lo_scontext;
+	struct context lo_tcontext;
+	struct av_decision lo_avd;
+	struct type_datum *source;
+	struct type_datum *target;
+	u32 masked = 0;
+
+	source = flex_array_get_ptr(policydb.type_val_to_struct_array,
+				    scontext->type - 1);
+	BUG_ON(!source);
+
+	target = flex_array_get_ptr(policydb.type_val_to_struct_array,
+				    tcontext->type - 1);
+	BUG_ON(!target);
+
+	if (source->bounds) {
+		memset(&lo_avd, 0, sizeof(lo_avd));
+
+		memcpy(&lo_scontext, scontext, sizeof(lo_scontext));
+		lo_scontext.type = source->bounds;
+
+		context_struct_compute_av(&lo_scontext,
+					  tcontext,
+					  tclass,
+					  &lo_avd,
+					  NULL);
+		if ((lo_avd.allowed & avd->allowed) == avd->allowed)
+			return;		/* no masked permission */
+		masked = ~lo_avd.allowed & avd->allowed;
+	}
+
+	if (target->bounds) {
+		memset(&lo_avd, 0, sizeof(lo_avd));
+
+		memcpy(&lo_tcontext, tcontext, sizeof(lo_tcontext));
+		lo_tcontext.type = target->bounds;
+
+		context_struct_compute_av(scontext,
+					  &lo_tcontext,
+					  tclass,
+					  &lo_avd,
+					  NULL);
+		if ((lo_avd.allowed & avd->allowed) == avd->allowed)
+			return;		/* no masked permission */
+		masked = ~lo_avd.allowed & avd->allowed;
+	}
+
+	if (source->bounds && target->bounds) {
+		memset(&lo_avd, 0, sizeof(lo_avd));
+		/*
+		 * lo_scontext and lo_tcontext are already
+		 * set up.
+		 */
+
+		context_struct_compute_av(&lo_scontext,
+					  &lo_tcontext,
+					  tclass,
+					  &lo_avd,
+					  NULL);
+		if ((lo_avd.allowed & avd->allowed) == avd->allowed)
+			return;		/* no masked permission */
+		masked = ~lo_avd.allowed & avd->allowed;
+	}
+
+	if (masked) {
+		/* mask violated permissions */
+		avd->allowed &= ~masked;
+
+		/* audit masked permissions */
+		security_dump_masked_av(scontext, tcontext,
+					tclass, masked, "bounds");
+	}
+}
+
+/*
+ * flag which drivers have permissions
+ * only looking for ioctl based extended permssions
+ */
+void services_compute_xperms_drivers(
+		struct extended_perms *xperms,
+		struct avtab_node *node)
+{
+	unsigned int i;
+
+	if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+		/* if one or more driver has all permissions allowed */
+		for (i = 0; i < ARRAY_SIZE(xperms->drivers.p); i++)
+			xperms->drivers.p[i] |= node->datum.u.xperms->perms.p[i];
+	} else if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+		/* if allowing permissions within a driver */
+		security_xperm_set(xperms->drivers.p,
+					node->datum.u.xperms->driver);
+	}
+
+	/* If no ioctl commands are allowed, ignore auditallow and auditdeny */
+	if (node->key.specified & AVTAB_XPERMS_ALLOWED)
+		xperms->len = 1;
+}
+
+/*
+ * Compute access vectors and extended permissions based on a context
+ * structure pair for the permissions in a particular class.
+ */
+static void context_struct_compute_av(struct context *scontext,
+					struct context *tcontext,
+					u16 tclass,
+					struct av_decision *avd,
+					struct extended_perms *xperms)
+{
+	struct constraint_node *constraint;
+	struct role_allow *ra;
+	struct avtab_key avkey;
+	struct avtab_node *node;
+	struct class_datum *tclass_datum;
+	struct ebitmap *sattr, *tattr;
+	struct ebitmap_node *snode, *tnode;
+	unsigned int i, j;
+
+	avd->allowed = 0;
+	avd->auditallow = 0;
+	avd->auditdeny = 0xffffffff;
+	if (xperms) {
+		memset(&xperms->drivers, 0, sizeof(xperms->drivers));
+		xperms->len = 0;
+	}
+
+	if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
+		if (printk_ratelimit())
+			printk(KERN_WARNING "SELinux:  Invalid class %hu\n", tclass);
+		return;
+	}
+
+	tclass_datum = policydb.class_val_to_struct[tclass - 1];
+
+	/*
+	 * If a specific type enforcement rule was defined for
+	 * this permission check, then use it.
+	 */
+	avkey.target_class = tclass;
+	avkey.specified = AVTAB_AV | AVTAB_XPERMS;
+	sattr = flex_array_get(policydb.type_attr_map_array, scontext->type - 1);
+	BUG_ON(!sattr);
+	tattr = flex_array_get(policydb.type_attr_map_array, tcontext->type - 1);
+	BUG_ON(!tattr);
+	ebitmap_for_each_positive_bit(sattr, snode, i) {
+		ebitmap_for_each_positive_bit(tattr, tnode, j) {
+			avkey.source_type = i + 1;
+			avkey.target_type = j + 1;
+			for (node = avtab_search_node(&policydb.te_avtab, &avkey);
+			     node;
+			     node = avtab_search_node_next(node, avkey.specified)) {
+				if (node->key.specified == AVTAB_ALLOWED)
+					avd->allowed |= node->datum.u.data;
+				else if (node->key.specified == AVTAB_AUDITALLOW)
+					avd->auditallow |= node->datum.u.data;
+				else if (node->key.specified == AVTAB_AUDITDENY)
+					avd->auditdeny &= node->datum.u.data;
+				else if (xperms && (node->key.specified & AVTAB_XPERMS))
+					services_compute_xperms_drivers(xperms, node);
+			}
+
+			/* Check conditional av table for additional permissions */
+			cond_compute_av(&policydb.te_cond_avtab, &avkey,
+					avd, xperms);
+
+		}
+	}
+
+	/*
+	 * Remove any permissions prohibited by a constraint (this includes
+	 * the MLS policy).
+	 */
+	constraint = tclass_datum->constraints;
+	while (constraint) {
+		if ((constraint->permissions & (avd->allowed)) &&
+		    !constraint_expr_eval(scontext, tcontext, NULL,
+					  constraint->expr)) {
+			avd->allowed &= ~(constraint->permissions);
+		}
+		constraint = constraint->next;
+	}
+
+	/*
+	 * If checking process transition permission and the
+	 * role is changing, then check the (current_role, new_role)
+	 * pair.
+	 */
+	if (tclass == policydb.process_class &&
+	    (avd->allowed & policydb.process_trans_perms) &&
+	    scontext->role != tcontext->role) {
+		for (ra = policydb.role_allow; ra; ra = ra->next) {
+			if (scontext->role == ra->role &&
+			    tcontext->role == ra->new_role)
+				break;
+		}
+		if (!ra)
+			avd->allowed &= ~policydb.process_trans_perms;
+	}
+
+	/*
+	 * If the given source and target types have boundary
+	 * constraint, lazy checks have to mask any violated
+	 * permission and notice it to userspace via audit.
+	 */
+	type_attribute_bounds_av(scontext, tcontext,
+				 tclass, avd);
+}
+
+static int security_validtrans_handle_fail(struct context *ocontext,
+					   struct context *ncontext,
+					   struct context *tcontext,
+					   u16 tclass)
+{
+	char *o = NULL, *n = NULL, *t = NULL;
+	u32 olen, nlen, tlen;
+
+	if (context_struct_to_string(ocontext, &o, &olen))
+		goto out;
+	if (context_struct_to_string(ncontext, &n, &nlen))
+		goto out;
+	if (context_struct_to_string(tcontext, &t, &tlen))
+		goto out;
+	audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
+		  "op=security_validate_transition seresult=denied"
+		  " oldcontext=%s newcontext=%s taskcontext=%s tclass=%s",
+		  o, n, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
+out:
+	kfree(o);
+	kfree(n);
+	kfree(t);
+
+	if (!selinux_enforcing)
+		return 0;
+	return -EPERM;
+}
+
+int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
+				 u16 orig_tclass)
+{
+	struct context *ocontext;
+	struct context *ncontext;
+	struct context *tcontext;
+	struct class_datum *tclass_datum;
+	struct constraint_node *constraint;
+	u16 tclass;
+	int rc = 0;
+
+	if (!ss_initialized)
+		return 0;
+
+	read_lock(&policy_rwlock);
+
+	tclass = unmap_class(orig_tclass);
+
+	if (!tclass || tclass > policydb.p_classes.nprim) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized class %d\n",
+			__func__, tclass);
+		rc = -EINVAL;
+		goto out;
+	}
+	tclass_datum = policydb.class_val_to_struct[tclass - 1];
+
+	ocontext = sidtab_search(&sidtab, oldsid);
+	if (!ocontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+			__func__, oldsid);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	ncontext = sidtab_search(&sidtab, newsid);
+	if (!ncontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+			__func__, newsid);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	tcontext = sidtab_search(&sidtab, tasksid);
+	if (!tcontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+			__func__, tasksid);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	constraint = tclass_datum->validatetrans;
+	while (constraint) {
+		if (!constraint_expr_eval(ocontext, ncontext, tcontext,
+					  constraint->expr)) {
+			rc = security_validtrans_handle_fail(ocontext, ncontext,
+							     tcontext, tclass);
+			goto out;
+		}
+		constraint = constraint->next;
+	}
+
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+/*
+ * security_bounded_transition - check whether the given
+ * transition is directed to bounded, or not.
+ * It returns 0, if @newsid is bounded by @oldsid.
+ * Otherwise, it returns error code.
+ *
+ * @oldsid : current security identifier
+ * @newsid : destinated security identifier
+ */
+int security_bounded_transition(u32 old_sid, u32 new_sid)
+{
+	struct context *old_context, *new_context;
+	struct type_datum *type;
+	int index;
+	int rc;
+
+	read_lock(&policy_rwlock);
+
+	rc = -EINVAL;
+	old_context = sidtab_search(&sidtab, old_sid);
+	if (!old_context) {
+		printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
+		       __func__, old_sid);
+		goto out;
+	}
+
+	rc = -EINVAL;
+	new_context = sidtab_search(&sidtab, new_sid);
+	if (!new_context) {
+		printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
+		       __func__, new_sid);
+		goto out;
+	}
+
+	rc = 0;
+	/* type/domain unchanged */
+	if (old_context->type == new_context->type)
+		goto out;
+
+	index = new_context->type;
+	while (true) {
+		type = flex_array_get_ptr(policydb.type_val_to_struct_array,
+					  index - 1);
+		BUG_ON(!type);
+
+		/* not bounded anymore */
+		rc = -EPERM;
+		if (!type->bounds)
+			break;
+
+		/* @newsid is bounded by @oldsid */
+		rc = 0;
+		if (type->bounds == old_context->type)
+			break;
+
+		index = type->bounds;
+	}
+
+	if (rc) {
+		char *old_name = NULL;
+		char *new_name = NULL;
+		u32 length;
+
+		if (!context_struct_to_string(old_context,
+					      &old_name, &length) &&
+		    !context_struct_to_string(new_context,
+					      &new_name, &length)) {
+			audit_log(current->audit_context,
+				  GFP_ATOMIC, AUDIT_SELINUX_ERR,
+				  "op=security_bounded_transition "
+				  "seresult=denied "
+				  "oldcontext=%s newcontext=%s",
+				  old_name, new_name);
+		}
+		kfree(new_name);
+		kfree(old_name);
+	}
+out:
+	read_unlock(&policy_rwlock);
+
+	return rc;
+}
+
+static void avd_init(struct av_decision *avd)
+{
+	avd->allowed = 0;
+	avd->auditallow = 0;
+	avd->auditdeny = 0xffffffff;
+	avd->seqno = latest_granting;
+	avd->flags = 0;
+}
+
+void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+					struct avtab_node *node)
+{
+	unsigned int i;
+
+	if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+		if (xpermd->driver != node->datum.u.xperms->driver)
+			return;
+	} else if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+		if (!security_xperm_test(node->datum.u.xperms->perms.p,
+					xpermd->driver))
+			return;
+	} else {
+		BUG();
+	}
+
+	if (node->key.specified == AVTAB_XPERMS_ALLOWED) {
+		xpermd->used |= XPERMS_ALLOWED;
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+			memset(xpermd->allowed->p, 0xff,
+					sizeof(xpermd->allowed->p));
+		}
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+			for (i = 0; i < ARRAY_SIZE(xpermd->allowed->p); i++)
+				xpermd->allowed->p[i] |=
+					node->datum.u.xperms->perms.p[i];
+		}
+	} else if (node->key.specified == AVTAB_XPERMS_AUDITALLOW) {
+		xpermd->used |= XPERMS_AUDITALLOW;
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+			memset(xpermd->auditallow->p, 0xff,
+					sizeof(xpermd->auditallow->p));
+		}
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+			for (i = 0; i < ARRAY_SIZE(xpermd->auditallow->p); i++)
+				xpermd->auditallow->p[i] |=
+					node->datum.u.xperms->perms.p[i];
+		}
+	} else if (node->key.specified == AVTAB_XPERMS_DONTAUDIT) {
+		xpermd->used |= XPERMS_DONTAUDIT;
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+			memset(xpermd->dontaudit->p, 0xff,
+					sizeof(xpermd->dontaudit->p));
+		}
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+			for (i = 0; i < ARRAY_SIZE(xpermd->dontaudit->p); i++)
+				xpermd->dontaudit->p[i] |=
+					node->datum.u.xperms->perms.p[i];
+		}
+	} else {
+		BUG();
+	}
+}
+
+void security_compute_xperms_decision(u32 ssid,
+				u32 tsid,
+				u16 orig_tclass,
+				u8 driver,
+				struct extended_perms_decision *xpermd)
+{
+	u16 tclass;
+	struct context *scontext, *tcontext;
+	struct avtab_key avkey;
+	struct avtab_node *node;
+	struct ebitmap *sattr, *tattr;
+	struct ebitmap_node *snode, *tnode;
+	unsigned int i, j;
+
+	xpermd->driver = driver;
+	xpermd->used = 0;
+	memset(xpermd->allowed->p, 0, sizeof(xpermd->allowed->p));
+	memset(xpermd->auditallow->p, 0, sizeof(xpermd->auditallow->p));
+	memset(xpermd->dontaudit->p, 0, sizeof(xpermd->dontaudit->p));
+
+	read_lock(&policy_rwlock);
+	if (!ss_initialized)
+		goto allow;
+
+	scontext = sidtab_search(&sidtab, ssid);
+	if (!scontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, ssid);
+		goto out;
+	}
+
+	tcontext = sidtab_search(&sidtab, tsid);
+	if (!tcontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, tsid);
+		goto out;
+	}
+
+	tclass = unmap_class(orig_tclass);
+	if (unlikely(orig_tclass && !tclass)) {
+		if (policydb.allow_unknown)
+			goto allow;
+		goto out;
+	}
+
+
+	if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
+		pr_warn_ratelimited("SELinux:  Invalid class %hu\n", tclass);
+		goto out;
+	}
+
+	avkey.target_class = tclass;
+	avkey.specified = AVTAB_XPERMS;
+	sattr = flex_array_get(policydb.type_attr_map_array,
+				scontext->type - 1);
+	BUG_ON(!sattr);
+	tattr = flex_array_get(policydb.type_attr_map_array,
+				tcontext->type - 1);
+	BUG_ON(!tattr);
+	ebitmap_for_each_positive_bit(sattr, snode, i) {
+		ebitmap_for_each_positive_bit(tattr, tnode, j) {
+			avkey.source_type = i + 1;
+			avkey.target_type = j + 1;
+			for (node = avtab_search_node(&policydb.te_avtab, &avkey);
+			     node;
+			     node = avtab_search_node_next(node, avkey.specified))
+				services_compute_xperms_decision(xpermd, node);
+
+			cond_compute_xperms(&policydb.te_cond_avtab,
+						&avkey, xpermd);
+		}
+	}
+out:
+	read_unlock(&policy_rwlock);
+	return;
+allow:
+	memset(xpermd->allowed->p, 0xff, sizeof(xpermd->allowed->p));
+	goto out;
+}
+
+/**
+ * security_compute_av - Compute access vector decisions.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @avd: access vector decisions
+ * @xperms: extended permissions
+ *
+ * Compute a set of access vector decisions based on the
+ * SID pair (@ssid, @tsid) for the permissions in @tclass.
+ */
+void security_compute_av(u32 ssid,
+			 u32 tsid,
+			 u16 orig_tclass,
+			 struct av_decision *avd,
+			 struct extended_perms *xperms)
+{
+	u16 tclass;
+	struct context *scontext = NULL, *tcontext = NULL;
+
+	read_lock(&policy_rwlock);
+	avd_init(avd);
+	xperms->len = 0;
+	if (!ss_initialized)
+		goto allow;
+
+	scontext = sidtab_search(&sidtab, ssid);
+	if (!scontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, ssid);
+		goto out;
+	}
+
+	/* permissive domain? */
+	if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
+		avd->flags |= AVD_FLAGS_PERMISSIVE;
+
+	tcontext = sidtab_search(&sidtab, tsid);
+	if (!tcontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, tsid);
+		goto out;
+	}
+
+	tclass = unmap_class(orig_tclass);
+	if (unlikely(orig_tclass && !tclass)) {
+		if (policydb.allow_unknown)
+			goto allow;
+		goto out;
+	}
+	context_struct_compute_av(scontext, tcontext, tclass, avd, xperms);
+	map_decision(orig_tclass, avd, policydb.allow_unknown);
+out:
+	read_unlock(&policy_rwlock);
+	return;
+allow:
+	avd->allowed = 0xffffffff;
+	goto out;
+}
+
+void security_compute_av_user(u32 ssid,
+			      u32 tsid,
+			      u16 tclass,
+			      struct av_decision *avd)
+{
+	struct context *scontext = NULL, *tcontext = NULL;
+
+	read_lock(&policy_rwlock);
+	avd_init(avd);
+	if (!ss_initialized)
+		goto allow;
+
+	scontext = sidtab_search(&sidtab, ssid);
+	if (!scontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, ssid);
+		goto out;
+	}
+
+	/* permissive domain? */
+	if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
+		avd->flags |= AVD_FLAGS_PERMISSIVE;
+
+	tcontext = sidtab_search(&sidtab, tsid);
+	if (!tcontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, tsid);
+		goto out;
+	}
+
+	if (unlikely(!tclass)) {
+		if (policydb.allow_unknown)
+			goto allow;
+		goto out;
+	}
+
+	context_struct_compute_av(scontext, tcontext, tclass, avd, NULL);
+ out:
+	read_unlock(&policy_rwlock);
+	return;
+allow:
+	avd->allowed = 0xffffffff;
+	goto out;
+}
+
+/*
+ * Write the security context string representation of
+ * the context structure `context' into a dynamically
+ * allocated string of the correct size.  Set `*scontext'
+ * to point to this string and set `*scontext_len' to
+ * the length of the string.
+ */
+static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len)
+{
+	char *scontextp;
+
+	if (scontext)
+		*scontext = NULL;
+	*scontext_len = 0;
+
+	if (context->len) {
+		*scontext_len = context->len;
+		if (scontext) {
+			*scontext = kstrdup(context->str, GFP_ATOMIC);
+			if (!(*scontext))
+				return -ENOMEM;
+		}
+		return 0;
+	}
+
+	/* Compute the size of the context. */
+	*scontext_len += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1;
+	*scontext_len += strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1;
+	*scontext_len += strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)) + 1;
+	*scontext_len += mls_compute_context_len(context);
+
+	if (!scontext)
+		return 0;
+
+	/* Allocate space for the context; caller must free this space. */
+	scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
+	if (!scontextp)
+		return -ENOMEM;
+	*scontext = scontextp;
+
+	/*
+	 * Copy the user name, role name and type name into the context.
+	 */
+	scontextp += sprintf(scontextp, "%s:%s:%s",
+		sym_name(&policydb, SYM_USERS, context->user - 1),
+		sym_name(&policydb, SYM_ROLES, context->role - 1),
+		sym_name(&policydb, SYM_TYPES, context->type - 1));
+
+	mls_sid_to_context(context, &scontextp);
+
+	*scontextp = 0;
+
+	return 0;
+}
+
+#include "initial_sid_to_string.h"
+
+const char *security_get_initial_sid_context(u32 sid)
+{
+	if (unlikely(sid > SECINITSID_NUM))
+		return NULL;
+	return initial_sid_to_string[sid];
+}
+
+static int security_sid_to_context_core(u32 sid, char **scontext,
+					u32 *scontext_len, int force)
+{
+	struct context *context;
+	int rc = 0;
+
+	if (scontext)
+		*scontext = NULL;
+	*scontext_len  = 0;
+
+	if (!ss_initialized) {
+		if (sid <= SECINITSID_NUM) {
+			char *scontextp;
+
+			*scontext_len = strlen(initial_sid_to_string[sid]) + 1;
+			if (!scontext)
+				goto out;
+			scontextp = kmemdup(initial_sid_to_string[sid],
+					    *scontext_len, GFP_ATOMIC);
+			if (!scontextp) {
+				rc = -ENOMEM;
+				goto out;
+			}
+			*scontext = scontextp;
+			goto out;
+		}
+		printk(KERN_ERR "SELinux: %s:  called before initial "
+		       "load_policy on unknown SID %d\n", __func__, sid);
+		rc = -EINVAL;
+		goto out;
+	}
+	read_lock(&policy_rwlock);
+	if (force)
+		context = sidtab_search_force(&sidtab, sid);
+	else
+		context = sidtab_search(&sidtab, sid);
+	if (!context) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+			__func__, sid);
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+	rc = context_struct_to_string(context, scontext, scontext_len);
+out_unlock:
+	read_unlock(&policy_rwlock);
+out:
+	return rc;
+
+}
+
+/**
+ * security_sid_to_context - Obtain a context for a given SID.
+ * @sid: security identifier, SID
+ * @scontext: security context
+ * @scontext_len: length in bytes
+ *
+ * Write the string representation of the context associated with @sid
+ * into a dynamically allocated string of the correct size.  Set @scontext
+ * to point to this string and set @scontext_len to the length of the string.
+ */
+int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len)
+{
+	return security_sid_to_context_core(sid, scontext, scontext_len, 0);
+}
+
+int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len)
+{
+	return security_sid_to_context_core(sid, scontext, scontext_len, 1);
+}
+
+/*
+ * Caveat:  Mutates scontext.
+ */
+static int string_to_context_struct(struct policydb *pol,
+				    struct sidtab *sidtabp,
+				    char *scontext,
+				    u32 scontext_len,
+				    struct context *ctx,
+				    u32 def_sid)
+{
+	struct role_datum *role;
+	struct type_datum *typdatum;
+	struct user_datum *usrdatum;
+	char *scontextp, *p, oldc;
+	int rc = 0;
+
+	context_init(ctx);
+
+	/* Parse the security context. */
+
+	rc = -EINVAL;
+	scontextp = (char *) scontext;
+
+	/* Extract the user. */
+	p = scontextp;
+	while (*p && *p != ':')
+		p++;
+
+	if (*p == 0)
+		goto out;
+
+	*p++ = 0;
+
+	usrdatum = hashtab_search(pol->p_users.table, scontextp);
+	if (!usrdatum)
+		goto out;
+
+	ctx->user = usrdatum->value;
+
+	/* Extract role. */
+	scontextp = p;
+	while (*p && *p != ':')
+		p++;
+
+	if (*p == 0)
+		goto out;
+
+	*p++ = 0;
+
+	role = hashtab_search(pol->p_roles.table, scontextp);
+	if (!role)
+		goto out;
+	ctx->role = role->value;
+
+	/* Extract type. */
+	scontextp = p;
+	while (*p && *p != ':')
+		p++;
+	oldc = *p;
+	*p++ = 0;
+
+	typdatum = hashtab_search(pol->p_types.table, scontextp);
+	if (!typdatum || typdatum->attribute)
+		goto out;
+
+	ctx->type = typdatum->value;
+
+	rc = mls_context_to_sid(pol, oldc, &p, ctx, sidtabp, def_sid);
+	if (rc)
+		goto out;
+
+	rc = -EINVAL;
+	if ((p - scontext) < scontext_len)
+		goto out;
+
+	/* Check the validity of the new context. */
+	if (!policydb_context_isvalid(pol, ctx))
+		goto out;
+	rc = 0;
+out:
+	if (rc)
+		context_destroy(ctx);
+	return rc;
+}
+
+static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
+					u32 *sid, u32 def_sid, gfp_t gfp_flags,
+					int force)
+{
+	char *scontext2, *str = NULL;
+	struct context context;
+	int rc = 0;
+
+	/* An empty security context is never valid. */
+	if (!scontext_len)
+		return -EINVAL;
+
+	if (!ss_initialized) {
+		int i;
+
+		for (i = 1; i < SECINITSID_NUM; i++) {
+			if (!strcmp(initial_sid_to_string[i], scontext)) {
+				*sid = i;
+				return 0;
+			}
+		}
+		*sid = SECINITSID_KERNEL;
+		return 0;
+	}
+	*sid = SECSID_NULL;
+
+	/* Copy the string so that we can modify the copy as we parse it. */
+	scontext2 = kmalloc(scontext_len + 1, gfp_flags);
+	if (!scontext2)
+		return -ENOMEM;
+	memcpy(scontext2, scontext, scontext_len);
+	scontext2[scontext_len] = 0;
+
+	if (force) {
+		/* Save another copy for storing in uninterpreted form */
+		rc = -ENOMEM;
+		str = kstrdup(scontext2, gfp_flags);
+		if (!str)
+			goto out;
+	}
+
+	read_lock(&policy_rwlock);
+	rc = string_to_context_struct(&policydb, &sidtab, scontext2,
+				      scontext_len, &context, def_sid);
+	if (rc == -EINVAL && force) {
+		context.str = str;
+		context.len = scontext_len;
+		str = NULL;
+	} else if (rc)
+		goto out_unlock;
+	rc = sidtab_context_to_sid(&sidtab, &context, sid);
+	context_destroy(&context);
+out_unlock:
+	read_unlock(&policy_rwlock);
+out:
+	kfree(scontext2);
+	kfree(str);
+	return rc;
+}
+
+/**
+ * security_context_to_sid - Obtain a SID for a given security context.
+ * @scontext: security context
+ * @scontext_len: length in bytes
+ * @sid: security identifier, SID
+ * @gfp: context for the allocation
+ *
+ * Obtains a SID associated with the security context that
+ * has the string representation specified by @scontext.
+ * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
+ * memory is available, or 0 on success.
+ */
+int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid,
+			    gfp_t gfp)
+{
+	return security_context_to_sid_core(scontext, scontext_len,
+					    sid, SECSID_NULL, gfp, 0);
+}
+
+int security_context_str_to_sid(const char *scontext, u32 *sid, gfp_t gfp)
+{
+	return security_context_to_sid(scontext, strlen(scontext), sid, gfp);
+}
+
+/**
+ * security_context_to_sid_default - Obtain a SID for a given security context,
+ * falling back to specified default if needed.
+ *
+ * @scontext: security context
+ * @scontext_len: length in bytes
+ * @sid: security identifier, SID
+ * @def_sid: default SID to assign on error
+ *
+ * Obtains a SID associated with the security context that
+ * has the string representation specified by @scontext.
+ * The default SID is passed to the MLS layer to be used to allow
+ * kernel labeling of the MLS field if the MLS field is not present
+ * (for upgrading to MLS without full relabel).
+ * Implicitly forces adding of the context even if it cannot be mapped yet.
+ * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
+ * memory is available, or 0 on success.
+ */
+int security_context_to_sid_default(const char *scontext, u32 scontext_len,
+				    u32 *sid, u32 def_sid, gfp_t gfp_flags)
+{
+	return security_context_to_sid_core(scontext, scontext_len,
+					    sid, def_sid, gfp_flags, 1);
+}
+
+int security_context_to_sid_force(const char *scontext, u32 scontext_len,
+				  u32 *sid)
+{
+	return security_context_to_sid_core(scontext, scontext_len,
+					    sid, SECSID_NULL, GFP_KERNEL, 1);
+}
+
+static int compute_sid_handle_invalid_context(
+	struct context *scontext,
+	struct context *tcontext,
+	u16 tclass,
+	struct context *newcontext)
+{
+	char *s = NULL, *t = NULL, *n = NULL;
+	u32 slen, tlen, nlen;
+
+	if (context_struct_to_string(scontext, &s, &slen))
+		goto out;
+	if (context_struct_to_string(tcontext, &t, &tlen))
+		goto out;
+	if (context_struct_to_string(newcontext, &n, &nlen))
+		goto out;
+	audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
+		  "op=security_compute_sid invalid_context=%s"
+		  " scontext=%s"
+		  " tcontext=%s"
+		  " tclass=%s",
+		  n, s, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
+out:
+	kfree(s);
+	kfree(t);
+	kfree(n);
+	if (!selinux_enforcing)
+		return 0;
+	return -EACCES;
+}
+
+static void filename_compute_type(struct policydb *p, struct context *newcontext,
+				  u32 stype, u32 ttype, u16 tclass,
+				  const char *objname)
+{
+	struct filename_trans ft;
+	struct filename_trans_datum *otype;
+
+	/*
+	 * Most filename trans rules are going to live in specific directories
+	 * like /dev or /var/run.  This bitmap will quickly skip rule searches
+	 * if the ttype does not contain any rules.
+	 */
+	if (!ebitmap_get_bit(&p->filename_trans_ttypes, ttype))
+		return;
+
+	ft.stype = stype;
+	ft.ttype = ttype;
+	ft.tclass = tclass;
+	ft.name = objname;
+
+	otype = hashtab_search(p->filename_trans, &ft);
+	if (otype)
+		newcontext->type = otype->otype;
+}
+
+static int security_compute_sid(u32 ssid,
+				u32 tsid,
+				u16 orig_tclass,
+				u32 specified,
+				const char *objname,
+				u32 *out_sid,
+				bool kern)
+{
+	struct class_datum *cladatum = NULL;
+	struct context *scontext = NULL, *tcontext = NULL, newcontext;
+	struct role_trans *roletr = NULL;
+	struct avtab_key avkey;
+	struct avtab_datum *avdatum;
+	struct avtab_node *node;
+	u16 tclass;
+	int rc = 0;
+	bool sock;
+
+	if (!ss_initialized) {
+		switch (orig_tclass) {
+		case SECCLASS_PROCESS: /* kernel value */
+			*out_sid = ssid;
+			break;
+		default:
+			*out_sid = tsid;
+			break;
+		}
+		goto out;
+	}
+
+	context_init(&newcontext);
+
+	read_lock(&policy_rwlock);
+
+	if (kern) {
+		tclass = unmap_class(orig_tclass);
+		sock = security_is_socket_class(orig_tclass);
+	} else {
+		tclass = orig_tclass;
+		sock = security_is_socket_class(map_class(tclass));
+	}
+
+	scontext = sidtab_search(&sidtab, ssid);
+	if (!scontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, ssid);
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+	tcontext = sidtab_search(&sidtab, tsid);
+	if (!tcontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, tsid);
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (tclass && tclass <= policydb.p_classes.nprim)
+		cladatum = policydb.class_val_to_struct[tclass - 1];
+
+	/* Set the user identity. */
+	switch (specified) {
+	case AVTAB_TRANSITION:
+	case AVTAB_CHANGE:
+		if (cladatum && cladatum->default_user == DEFAULT_TARGET) {
+			newcontext.user = tcontext->user;
+		} else {
+			/* notice this gets both DEFAULT_SOURCE and unset */
+			/* Use the process user identity. */
+			newcontext.user = scontext->user;
+		}
+		break;
+	case AVTAB_MEMBER:
+		/* Use the related object owner. */
+		newcontext.user = tcontext->user;
+		break;
+	}
+
+	/* Set the role to default values. */
+	if (cladatum && cladatum->default_role == DEFAULT_SOURCE) {
+		newcontext.role = scontext->role;
+	} else if (cladatum && cladatum->default_role == DEFAULT_TARGET) {
+		newcontext.role = tcontext->role;
+	} else {
+		if ((tclass == policydb.process_class) || (sock == true))
+			newcontext.role = scontext->role;
+		else
+			newcontext.role = OBJECT_R_VAL;
+	}
+
+	/* Set the type to default values. */
+	if (cladatum && cladatum->default_type == DEFAULT_SOURCE) {
+		newcontext.type = scontext->type;
+	} else if (cladatum && cladatum->default_type == DEFAULT_TARGET) {
+		newcontext.type = tcontext->type;
+	} else {
+		if ((tclass == policydb.process_class) || (sock == true)) {
+			/* Use the type of process. */
+			newcontext.type = scontext->type;
+		} else {
+			/* Use the type of the related object. */
+			newcontext.type = tcontext->type;
+		}
+	}
+
+	/* Look for a type transition/member/change rule. */
+	avkey.source_type = scontext->type;
+	avkey.target_type = tcontext->type;
+	avkey.target_class = tclass;
+	avkey.specified = specified;
+	avdatum = avtab_search(&policydb.te_avtab, &avkey);
+
+	/* If no permanent rule, also check for enabled conditional rules */
+	if (!avdatum) {
+		node = avtab_search_node(&policydb.te_cond_avtab, &avkey);
+		for (; node; node = avtab_search_node_next(node, specified)) {
+			if (node->key.specified & AVTAB_ENABLED) {
+				avdatum = &node->datum;
+				break;
+			}
+		}
+	}
+
+	if (avdatum) {
+		/* Use the type from the type transition/member/change rule. */
+		newcontext.type = avdatum->u.data;
+	}
+
+	/* if we have a objname this is a file trans check so check those rules */
+	if (objname)
+		filename_compute_type(&policydb, &newcontext, scontext->type,
+				      tcontext->type, tclass, objname);
+
+	/* Check for class-specific changes. */
+	if (specified & AVTAB_TRANSITION) {
+		/* Look for a role transition rule. */
+		for (roletr = policydb.role_tr; roletr; roletr = roletr->next) {
+			if ((roletr->role == scontext->role) &&
+			    (roletr->type == tcontext->type) &&
+			    (roletr->tclass == tclass)) {
+				/* Use the role transition rule. */
+				newcontext.role = roletr->new_role;
+				break;
+			}
+		}
+	}
+
+	/* Set the MLS attributes.
+	   This is done last because it may allocate memory. */
+	rc = mls_compute_sid(scontext, tcontext, tclass, specified,
+			     &newcontext, sock);
+	if (rc)
+		goto out_unlock;
+
+	/* Check the validity of the context. */
+	if (!policydb_context_isvalid(&policydb, &newcontext)) {
+		rc = compute_sid_handle_invalid_context(scontext,
+							tcontext,
+							tclass,
+							&newcontext);
+		if (rc)
+			goto out_unlock;
+	}
+	/* Obtain the sid for the context. */
+	rc = sidtab_context_to_sid(&sidtab, &newcontext, out_sid);
+out_unlock:
+	read_unlock(&policy_rwlock);
+	context_destroy(&newcontext);
+out:
+	return rc;
+}
+
+/**
+ * security_transition_sid - Compute the SID for a new subject/object.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @out_sid: security identifier for new subject/object
+ *
+ * Compute a SID to use for labeling a new subject or object in the
+ * class @tclass based on a SID pair (@ssid, @tsid).
+ * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM
+ * if insufficient memory is available, or %0 if the new SID was
+ * computed successfully.
+ */
+int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
+			    const struct qstr *qstr, u32 *out_sid)
+{
+	return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
+				    qstr ? qstr->name : NULL, out_sid, true);
+}
+
+int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass,
+				 const char *objname, u32 *out_sid)
+{
+	return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
+				    objname, out_sid, false);
+}
+
+/**
+ * security_member_sid - Compute the SID for member selection.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @out_sid: security identifier for selected member
+ *
+ * Compute a SID to use when selecting a member of a polyinstantiated
+ * object of class @tclass based on a SID pair (@ssid, @tsid).
+ * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM
+ * if insufficient memory is available, or %0 if the SID was
+ * computed successfully.
+ */
+int security_member_sid(u32 ssid,
+			u32 tsid,
+			u16 tclass,
+			u32 *out_sid)
+{
+	return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, NULL,
+				    out_sid, false);
+}
+
+/**
+ * security_change_sid - Compute the SID for object relabeling.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @out_sid: security identifier for selected member
+ *
+ * Compute a SID to use for relabeling an object of class @tclass
+ * based on a SID pair (@ssid, @tsid).
+ * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM
+ * if insufficient memory is available, or %0 if the SID was
+ * computed successfully.
+ */
+int security_change_sid(u32 ssid,
+			u32 tsid,
+			u16 tclass,
+			u32 *out_sid)
+{
+	return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, NULL,
+				    out_sid, false);
+}
+
+/* Clone the SID into the new SID table. */
+static int clone_sid(u32 sid,
+		     struct context *context,
+		     void *arg)
+{
+	struct sidtab *s = arg;
+
+	if (sid > SECINITSID_NUM)
+		return sidtab_insert(s, sid, context);
+	else
+		return 0;
+}
+
+static inline int convert_context_handle_invalid_context(struct context *context)
+{
+	char *s;
+	u32 len;
+
+	if (selinux_enforcing)
+		return -EINVAL;
+
+	if (!context_struct_to_string(context, &s, &len)) {
+		printk(KERN_WARNING "SELinux:  Context %s would be invalid if enforcing\n", s);
+		kfree(s);
+	}
+	return 0;
+}
+
+struct convert_context_args {
+	struct policydb *oldp;
+	struct policydb *newp;
+};
+
+/*
+ * Convert the values in the security context
+ * structure `c' from the values specified
+ * in the policy `p->oldp' to the values specified
+ * in the policy `p->newp'.  Verify that the
+ * context is valid under the new policy.
+ */
+static int convert_context(u32 key,
+			   struct context *c,
+			   void *p)
+{
+	struct convert_context_args *args;
+	struct context oldc;
+	struct ocontext *oc;
+	struct mls_range *range;
+	struct role_datum *role;
+	struct type_datum *typdatum;
+	struct user_datum *usrdatum;
+	char *s;
+	u32 len;
+	int rc = 0;
+
+	if (key <= SECINITSID_NUM)
+		goto out;
+
+	args = p;
+
+	if (c->str) {
+		struct context ctx;
+
+		rc = -ENOMEM;
+		s = kstrdup(c->str, GFP_KERNEL);
+		if (!s)
+			goto out;
+
+		rc = string_to_context_struct(args->newp, NULL, s,
+					      c->len, &ctx, SECSID_NULL);
+		kfree(s);
+		if (!rc) {
+			printk(KERN_INFO "SELinux:  Context %s became valid (mapped).\n",
+			       c->str);
+			/* Replace string with mapped representation. */
+			kfree(c->str);
+			memcpy(c, &ctx, sizeof(*c));
+			goto out;
+		} else if (rc == -EINVAL) {
+			/* Retain string representation for later mapping. */
+			rc = 0;
+			goto out;
+		} else {
+			/* Other error condition, e.g. ENOMEM. */
+			printk(KERN_ERR "SELinux:   Unable to map context %s, rc = %d.\n",
+			       c->str, -rc);
+			goto out;
+		}
+	}
+
+	rc = context_cpy(&oldc, c);
+	if (rc)
+		goto out;
+
+	/* Convert the user. */
+	rc = -EINVAL;
+	usrdatum = hashtab_search(args->newp->p_users.table,
+				  sym_name(args->oldp, SYM_USERS, c->user - 1));
+	if (!usrdatum)
+		goto bad;
+	c->user = usrdatum->value;
+
+	/* Convert the role. */
+	rc = -EINVAL;
+	role = hashtab_search(args->newp->p_roles.table,
+			      sym_name(args->oldp, SYM_ROLES, c->role - 1));
+	if (!role)
+		goto bad;
+	c->role = role->value;
+
+	/* Convert the type. */
+	rc = -EINVAL;
+	typdatum = hashtab_search(args->newp->p_types.table,
+				  sym_name(args->oldp, SYM_TYPES, c->type - 1));
+	if (!typdatum)
+		goto bad;
+	c->type = typdatum->value;
+
+	/* Convert the MLS fields if dealing with MLS policies */
+	if (args->oldp->mls_enabled && args->newp->mls_enabled) {
+		rc = mls_convert_context(args->oldp, args->newp, c);
+		if (rc)
+			goto bad;
+	} else if (args->oldp->mls_enabled && !args->newp->mls_enabled) {
+		/*
+		 * Switching between MLS and non-MLS policy:
+		 * free any storage used by the MLS fields in the
+		 * context for all existing entries in the sidtab.
+		 */
+		mls_context_destroy(c);
+	} else if (!args->oldp->mls_enabled && args->newp->mls_enabled) {
+		/*
+		 * Switching between non-MLS and MLS policy:
+		 * ensure that the MLS fields of the context for all
+		 * existing entries in the sidtab are filled in with a
+		 * suitable default value, likely taken from one of the
+		 * initial SIDs.
+		 */
+		oc = args->newp->ocontexts[OCON_ISID];
+		while (oc && oc->sid[0] != SECINITSID_UNLABELED)
+			oc = oc->next;
+		rc = -EINVAL;
+		if (!oc) {
+			printk(KERN_ERR "SELinux:  unable to look up"
+				" the initial SIDs list\n");
+			goto bad;
+		}
+		range = &oc->context[0].range;
+		rc = mls_range_set(c, range);
+		if (rc)
+			goto bad;
+	}
+
+	/* Check the validity of the new context. */
+	if (!policydb_context_isvalid(args->newp, c)) {
+		rc = convert_context_handle_invalid_context(&oldc);
+		if (rc)
+			goto bad;
+	}
+
+	context_destroy(&oldc);
+
+	rc = 0;
+out:
+	return rc;
+bad:
+	/* Map old representation to string and save it. */
+	rc = context_struct_to_string(&oldc, &s, &len);
+	if (rc)
+		return rc;
+	context_destroy(&oldc);
+	context_destroy(c);
+	c->str = s;
+	c->len = len;
+	printk(KERN_INFO "SELinux:  Context %s became invalid (unmapped).\n",
+	       c->str);
+	rc = 0;
+	goto out;
+}
+
+static void security_load_policycaps(void)
+{
+	selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps,
+						  POLICYDB_CAPABILITY_NETPEER);
+	selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
+						  POLICYDB_CAPABILITY_OPENPERM);
+	selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps,
+						  POLICYDB_CAPABILITY_ALWAYSNETWORK);
+}
+
+static int security_preserve_bools(struct policydb *p);
+
+/**
+ * security_load_policy - Load a security policy configuration.
+ * @data: binary policy data
+ * @len: length of data in bytes
+ *
+ * Load a new set of security policy configuration data,
+ * validate it and convert the SID table as necessary.
+ * This function will flush the access vector cache after
+ * loading the new policy.
+ */
+int security_load_policy(void *data, size_t len)
+{
+	struct policydb *oldpolicydb, *newpolicydb;
+	struct sidtab oldsidtab, newsidtab;
+	struct selinux_mapping *oldmap, *map = NULL;
+	struct convert_context_args args;
+	u32 seqno;
+	u16 map_size;
+	int rc = 0;
+	struct policy_file file = { data, len }, *fp = &file;
+
+	oldpolicydb = kzalloc(2 * sizeof(*oldpolicydb), GFP_KERNEL);
+	if (!oldpolicydb) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	newpolicydb = oldpolicydb + 1;
+
+	if (!ss_initialized) {
+		avtab_cache_init();
+		rc = policydb_read(&policydb, fp);
+		if (rc) {
+			avtab_cache_destroy();
+			goto out;
+		}
+
+		policydb.len = len;
+		rc = selinux_set_mapping(&policydb, secclass_map,
+					 &current_mapping,
+					 &current_mapping_size);
+		if (rc) {
+			policydb_destroy(&policydb);
+			avtab_cache_destroy();
+			goto out;
+		}
+
+		rc = policydb_load_isids(&policydb, &sidtab);
+		if (rc) {
+			policydb_destroy(&policydb);
+			avtab_cache_destroy();
+			goto out;
+		}
+
+		security_load_policycaps();
+		ss_initialized = 1;
+		seqno = ++latest_granting;
+		selinux_complete_init();
+		avc_ss_reset(seqno);
+		selnl_notify_policyload(seqno);
+		selinux_status_update_policyload(seqno);
+		selinux_netlbl_cache_invalidate();
+		selinux_xfrm_notify_policyload();
+		goto out;
+	}
+
+#if 0
+	sidtab_hash_eval(&sidtab, "sids");
+#endif
+
+	rc = policydb_read(newpolicydb, fp);
+	if (rc)
+		goto out;
+
+	newpolicydb->len = len;
+	/* If switching between different policy types, log MLS status */
+	if (policydb.mls_enabled && !newpolicydb->mls_enabled)
+		printk(KERN_INFO "SELinux: Disabling MLS support...\n");
+	else if (!policydb.mls_enabled && newpolicydb->mls_enabled)
+		printk(KERN_INFO "SELinux: Enabling MLS support...\n");
+
+	rc = policydb_load_isids(newpolicydb, &newsidtab);
+	if (rc) {
+		printk(KERN_ERR "SELinux:  unable to load the initial SIDs\n");
+		policydb_destroy(newpolicydb);
+		goto out;
+	}
+
+	rc = selinux_set_mapping(newpolicydb, secclass_map, &map, &map_size);
+	if (rc)
+		goto err;
+
+	rc = security_preserve_bools(newpolicydb);
+	if (rc) {
+		printk(KERN_ERR "SELinux:  unable to preserve booleans\n");
+		goto err;
+	}
+
+	/* Clone the SID table. */
+	sidtab_shutdown(&sidtab);
+
+	rc = sidtab_map(&sidtab, clone_sid, &newsidtab);
+	if (rc)
+		goto err;
+
+	/*
+	 * Convert the internal representations of contexts
+	 * in the new SID table.
+	 */
+	args.oldp = &policydb;
+	args.newp = newpolicydb;
+	rc = sidtab_map(&newsidtab, convert_context, &args);
+	if (rc) {
+		printk(KERN_ERR "SELinux:  unable to convert the internal"
+			" representation of contexts in the new SID"
+			" table\n");
+		goto err;
+	}
+
+	/* Save the old policydb and SID table to free later. */
+	memcpy(oldpolicydb, &policydb, sizeof(policydb));
+	sidtab_set(&oldsidtab, &sidtab);
+
+	/* Install the new policydb and SID table. */
+	write_lock_irq(&policy_rwlock);
+	memcpy(&policydb, newpolicydb, sizeof(policydb));
+	sidtab_set(&sidtab, &newsidtab);
+	security_load_policycaps();
+	oldmap = current_mapping;
+	current_mapping = map;
+	current_mapping_size = map_size;
+	seqno = ++latest_granting;
+	write_unlock_irq(&policy_rwlock);
+
+	/* Free the old policydb and SID table. */
+	policydb_destroy(oldpolicydb);
+	sidtab_destroy(&oldsidtab);
+	kfree(oldmap);
+
+	avc_ss_reset(seqno);
+	selnl_notify_policyload(seqno);
+	selinux_status_update_policyload(seqno);
+	selinux_netlbl_cache_invalidate();
+	selinux_xfrm_notify_policyload();
+
+	rc = 0;
+	goto out;
+
+err:
+	kfree(map);
+	sidtab_destroy(&newsidtab);
+	policydb_destroy(newpolicydb);
+
+out:
+	kfree(oldpolicydb);
+	return rc;
+}
+
+size_t security_policydb_len(void)
+{
+	size_t len;
+
+	read_lock(&policy_rwlock);
+	len = policydb.len;
+	read_unlock(&policy_rwlock);
+
+	return len;
+}
+
+/**
+ * security_port_sid - Obtain the SID for a port.
+ * @protocol: protocol number
+ * @port: port number
+ * @out_sid: security identifier
+ */
+int security_port_sid(u8 protocol, u16 port, u32 *out_sid)
+{
+	struct ocontext *c;
+	int rc = 0;
+
+	read_lock(&policy_rwlock);
+
+	c = policydb.ocontexts[OCON_PORT];
+	while (c) {
+		if (c->u.port.protocol == protocol &&
+		    c->u.port.low_port <= port &&
+		    c->u.port.high_port >= port)
+			break;
+		c = c->next;
+	}
+
+	if (c) {
+		if (!c->sid[0]) {
+			rc = sidtab_context_to_sid(&sidtab,
+						   &c->context[0],
+						   &c->sid[0]);
+			if (rc)
+				goto out;
+		}
+		*out_sid = c->sid[0];
+	} else {
+		*out_sid = SECINITSID_PORT;
+	}
+
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+/**
+ * security_netif_sid - Obtain the SID for a network interface.
+ * @name: interface name
+ * @if_sid: interface SID
+ */
+int security_netif_sid(char *name, u32 *if_sid)
+{
+	int rc = 0;
+	struct ocontext *c;
+
+	read_lock(&policy_rwlock);
+
+	c = policydb.ocontexts[OCON_NETIF];
+	while (c) {
+		if (strcmp(name, c->u.name) == 0)
+			break;
+		c = c->next;
+	}
+
+	if (c) {
+		if (!c->sid[0] || !c->sid[1]) {
+			rc = sidtab_context_to_sid(&sidtab,
+						  &c->context[0],
+						  &c->sid[0]);
+			if (rc)
+				goto out;
+			rc = sidtab_context_to_sid(&sidtab,
+						   &c->context[1],
+						   &c->sid[1]);
+			if (rc)
+				goto out;
+		}
+		*if_sid = c->sid[0];
+	} else
+		*if_sid = SECINITSID_NETIF;
+
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask)
+{
+	int i, fail = 0;
+
+	for (i = 0; i < 4; i++)
+		if (addr[i] != (input[i] & mask[i])) {
+			fail = 1;
+			break;
+		}
+
+	return !fail;
+}
+
+/**
+ * security_node_sid - Obtain the SID for a node (host).
+ * @domain: communication domain aka address family
+ * @addrp: address
+ * @addrlen: address length in bytes
+ * @out_sid: security identifier
+ */
+int security_node_sid(u16 domain,
+		      void *addrp,
+		      u32 addrlen,
+		      u32 *out_sid)
+{
+	int rc;
+	struct ocontext *c;
+
+	read_lock(&policy_rwlock);
+
+	switch (domain) {
+	case AF_INET: {
+		u32 addr;
+
+		rc = -EINVAL;
+		if (addrlen != sizeof(u32))
+			goto out;
+
+		addr = *((u32 *)addrp);
+
+		c = policydb.ocontexts[OCON_NODE];
+		while (c) {
+			if (c->u.node.addr == (addr & c->u.node.mask))
+				break;
+			c = c->next;
+		}
+		break;
+	}
+
+	case AF_INET6:
+		rc = -EINVAL;
+		if (addrlen != sizeof(u64) * 2)
+			goto out;
+		c = policydb.ocontexts[OCON_NODE6];
+		while (c) {
+			if (match_ipv6_addrmask(addrp, c->u.node6.addr,
+						c->u.node6.mask))
+				break;
+			c = c->next;
+		}
+		break;
+
+	default:
+		rc = 0;
+		*out_sid = SECINITSID_NODE;
+		goto out;
+	}
+
+	if (c) {
+		if (!c->sid[0]) {
+			rc = sidtab_context_to_sid(&sidtab,
+						   &c->context[0],
+						   &c->sid[0]);
+			if (rc)
+				goto out;
+		}
+		*out_sid = c->sid[0];
+	} else {
+		*out_sid = SECINITSID_NODE;
+	}
+
+	rc = 0;
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+#define SIDS_NEL 25
+
+/**
+ * security_get_user_sids - Obtain reachable SIDs for a user.
+ * @fromsid: starting SID
+ * @username: username
+ * @sids: array of reachable SIDs for user
+ * @nel: number of elements in @sids
+ *
+ * Generate the set of SIDs for legal security contexts
+ * for a given user that can be reached by @fromsid.
+ * Set *@sids to point to a dynamically allocated
+ * array containing the set of SIDs.  Set *@nel to the
+ * number of elements in the array.
+ */
+
+int security_get_user_sids(u32 fromsid,
+			   char *username,
+			   u32 **sids,
+			   u32 *nel)
+{
+	struct context *fromcon, usercon;
+	u32 *mysids = NULL, *mysids2, sid;
+	u32 mynel = 0, maxnel = SIDS_NEL;
+	struct user_datum *user;
+	struct role_datum *role;
+	struct ebitmap_node *rnode, *tnode;
+	int rc = 0, i, j;
+
+	*sids = NULL;
+	*nel = 0;
+
+	if (!ss_initialized)
+		goto out;
+
+	read_lock(&policy_rwlock);
+
+	context_init(&usercon);
+
+	rc = -EINVAL;
+	fromcon = sidtab_search(&sidtab, fromsid);
+	if (!fromcon)
+		goto out_unlock;
+
+	rc = -EINVAL;
+	user = hashtab_search(policydb.p_users.table, username);
+	if (!user)
+		goto out_unlock;
+
+	usercon.user = user->value;
+
+	rc = -ENOMEM;
+	mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
+	if (!mysids)
+		goto out_unlock;
+
+	ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
+		role = policydb.role_val_to_struct[i];
+		usercon.role = i + 1;
+		ebitmap_for_each_positive_bit(&role->types, tnode, j) {
+			usercon.type = j + 1;
+
+			if (mls_setup_user_range(fromcon, user, &usercon))
+				continue;
+
+			rc = sidtab_context_to_sid(&sidtab, &usercon, &sid);
+			if (rc)
+				goto out_unlock;
+			if (mynel < maxnel) {
+				mysids[mynel++] = sid;
+			} else {
+				rc = -ENOMEM;
+				maxnel += SIDS_NEL;
+				mysids2 = kcalloc(maxnel, sizeof(*mysids2), GFP_ATOMIC);
+				if (!mysids2)
+					goto out_unlock;
+				memcpy(mysids2, mysids, mynel * sizeof(*mysids2));
+				kfree(mysids);
+				mysids = mysids2;
+				mysids[mynel++] = sid;
+			}
+		}
+	}
+	rc = 0;
+out_unlock:
+	read_unlock(&policy_rwlock);
+	if (rc || !mynel) {
+		kfree(mysids);
+		goto out;
+	}
+
+	rc = -ENOMEM;
+	mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
+	if (!mysids2) {
+		kfree(mysids);
+		goto out;
+	}
+	for (i = 0, j = 0; i < mynel; i++) {
+		struct av_decision dummy_avd;
+		rc = avc_has_perm_noaudit(fromsid, mysids[i],
+					  SECCLASS_PROCESS, /* kernel value */
+					  PROCESS__TRANSITION, AVC_STRICT,
+					  &dummy_avd);
+		if (!rc)
+			mysids2[j++] = mysids[i];
+		cond_resched();
+	}
+	rc = 0;
+	kfree(mysids);
+	*sids = mysids2;
+	*nel = j;
+out:
+	return rc;
+}
+
+/**
+ * __security_genfs_sid - Helper to obtain a SID for a file in a filesystem
+ * @fstype: filesystem type
+ * @path: path from root of mount
+ * @sclass: file security class
+ * @sid: SID for path
+ *
+ * Obtain a SID to use for a file in a filesystem that
+ * cannot support xattr or use a fixed labeling behavior like
+ * transition SIDs or task SIDs.
+ *
+ * The caller must acquire the policy_rwlock before calling this function.
+ */
+static inline int __security_genfs_sid(const char *fstype,
+				       char *path,
+				       u16 orig_sclass,
+				       u32 *sid)
+{
+	int len;
+	u16 sclass;
+	struct genfs *genfs;
+	struct ocontext *c;
+	int rc, cmp = 0;
+
+	while (path[0] == '/' && path[1] == '/')
+		path++;
+
+	sclass = unmap_class(orig_sclass);
+	*sid = SECINITSID_UNLABELED;
+
+	for (genfs = policydb.genfs; genfs; genfs = genfs->next) {
+		cmp = strcmp(fstype, genfs->fstype);
+		if (cmp <= 0)
+			break;
+	}
+
+	rc = -ENOENT;
+	if (!genfs || cmp)
+		goto out;
+
+	for (c = genfs->head; c; c = c->next) {
+		len = strlen(c->u.name);
+		if ((!c->v.sclass || sclass == c->v.sclass) &&
+		    (strncmp(c->u.name, path, len) == 0))
+			break;
+	}
+
+	rc = -ENOENT;
+	if (!c)
+		goto out;
+
+	if (!c->sid[0]) {
+		rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]);
+		if (rc)
+			goto out;
+	}
+
+	*sid = c->sid[0];
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * security_genfs_sid - Obtain a SID for a file in a filesystem
+ * @fstype: filesystem type
+ * @path: path from root of mount
+ * @sclass: file security class
+ * @sid: SID for path
+ *
+ * Acquire policy_rwlock before calling __security_genfs_sid() and release
+ * it afterward.
+ */
+int security_genfs_sid(const char *fstype,
+		       char *path,
+		       u16 orig_sclass,
+		       u32 *sid)
+{
+	int retval;
+
+	read_lock(&policy_rwlock);
+	retval = __security_genfs_sid(fstype, path, orig_sclass, sid);
+	read_unlock(&policy_rwlock);
+	return retval;
+}
+
+/**
+ * security_fs_use - Determine how to handle labeling for a filesystem.
+ * @sb: superblock in question
+ */
+int security_fs_use(struct super_block *sb)
+{
+	int rc = 0;
+	struct ocontext *c;
+	struct superblock_security_struct *sbsec = sb->s_security;
+	const char *fstype = sb->s_type->name;
+
+	read_lock(&policy_rwlock);
+
+	c = policydb.ocontexts[OCON_FSUSE];
+	while (c) {
+		if (strcmp(fstype, c->u.name) == 0)
+			break;
+		c = c->next;
+	}
+
+	if (c) {
+		sbsec->behavior = c->v.behavior;
+		if (!c->sid[0]) {
+			rc = sidtab_context_to_sid(&sidtab, &c->context[0],
+						   &c->sid[0]);
+			if (rc)
+				goto out;
+		}
+		sbsec->sid = c->sid[0];
+	} else {
+		rc = __security_genfs_sid(fstype, "/", SECCLASS_DIR,
+					  &sbsec->sid);
+		if (rc) {
+			sbsec->behavior = SECURITY_FS_USE_NONE;
+			rc = 0;
+		} else {
+			sbsec->behavior = SECURITY_FS_USE_GENFS;
+		}
+	}
+
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+int security_get_bools(int *len, char ***names, int **values)
+{
+	int i, rc;
+
+	read_lock(&policy_rwlock);
+	*names = NULL;
+	*values = NULL;
+
+	rc = 0;
+	*len = policydb.p_bools.nprim;
+	if (!*len)
+		goto out;
+
+	rc = -ENOMEM;
+	*names = kcalloc(*len, sizeof(char *), GFP_ATOMIC);
+	if (!*names)
+		goto err;
+
+	rc = -ENOMEM;
+	*values = kcalloc(*len, sizeof(int), GFP_ATOMIC);
+	if (!*values)
+		goto err;
+
+	for (i = 0; i < *len; i++) {
+		(*values)[i] = policydb.bool_val_to_struct[i]->state;
+
+		rc = -ENOMEM;
+		(*names)[i] = kstrdup(sym_name(&policydb, SYM_BOOLS, i), GFP_ATOMIC);
+		if (!(*names)[i])
+			goto err;
+	}
+	rc = 0;
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+err:
+	if (*names) {
+		for (i = 0; i < *len; i++)
+			kfree((*names)[i]);
+	}
+	kfree(*values);
+	goto out;
+}
+
+
+int security_set_bools(int len, int *values)
+{
+	int i, rc;
+	int lenp, seqno = 0;
+	struct cond_node *cur;
+
+	write_lock_irq(&policy_rwlock);
+
+	rc = -EFAULT;
+	lenp = policydb.p_bools.nprim;
+	if (len != lenp)
+		goto out;
+
+	for (i = 0; i < len; i++) {
+		if (!!values[i] != policydb.bool_val_to_struct[i]->state) {
+			audit_log(current->audit_context, GFP_ATOMIC,
+				AUDIT_MAC_CONFIG_CHANGE,
+				"bool=%s val=%d old_val=%d auid=%u ses=%u",
+				sym_name(&policydb, SYM_BOOLS, i),
+				!!values[i],
+				policydb.bool_val_to_struct[i]->state,
+				from_kuid(&init_user_ns, audit_get_loginuid(current)),
+				audit_get_sessionid(current));
+		}
+		if (values[i])
+			policydb.bool_val_to_struct[i]->state = 1;
+		else
+			policydb.bool_val_to_struct[i]->state = 0;
+	}
+
+	for (cur = policydb.cond_list; cur; cur = cur->next) {
+		rc = evaluate_cond_node(&policydb, cur);
+		if (rc)
+			goto out;
+	}
+
+	seqno = ++latest_granting;
+	rc = 0;
+out:
+	write_unlock_irq(&policy_rwlock);
+	if (!rc) {
+		avc_ss_reset(seqno);
+		selnl_notify_policyload(seqno);
+		selinux_status_update_policyload(seqno);
+		selinux_xfrm_notify_policyload();
+	}
+	return rc;
+}
+
+int security_get_bool_value(int bool)
+{
+	int rc;
+	int len;
+
+	read_lock(&policy_rwlock);
+
+	rc = -EFAULT;
+	len = policydb.p_bools.nprim;
+	if (bool >= len)
+		goto out;
+
+	rc = policydb.bool_val_to_struct[bool]->state;
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+static int security_preserve_bools(struct policydb *p)
+{
+	int rc, nbools = 0, *bvalues = NULL, i;
+	char **bnames = NULL;
+	struct cond_bool_datum *booldatum;
+	struct cond_node *cur;
+
+	rc = security_get_bools(&nbools, &bnames, &bvalues);
+	if (rc)
+		goto out;
+	for (i = 0; i < nbools; i++) {
+		booldatum = hashtab_search(p->p_bools.table, bnames[i]);
+		if (booldatum)
+			booldatum->state = bvalues[i];
+	}
+	for (cur = p->cond_list; cur; cur = cur->next) {
+		rc = evaluate_cond_node(p, cur);
+		if (rc)
+			goto out;
+	}
+
+out:
+	if (bnames) {
+		for (i = 0; i < nbools; i++)
+			kfree(bnames[i]);
+	}
+	kfree(bnames);
+	kfree(bvalues);
+	return rc;
+}
+
+/*
+ * security_sid_mls_copy() - computes a new sid based on the given
+ * sid and the mls portion of mls_sid.
+ */
+int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
+{
+	struct context *context1;
+	struct context *context2;
+	struct context newcon;
+	char *s;
+	u32 len;
+	int rc;
+
+	rc = 0;
+	if (!ss_initialized || !policydb.mls_enabled) {
+		*new_sid = sid;
+		goto out;
+	}
+
+	context_init(&newcon);
+
+	read_lock(&policy_rwlock);
+
+	rc = -EINVAL;
+	context1 = sidtab_search(&sidtab, sid);
+	if (!context1) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+			__func__, sid);
+		goto out_unlock;
+	}
+
+	rc = -EINVAL;
+	context2 = sidtab_search(&sidtab, mls_sid);
+	if (!context2) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+			__func__, mls_sid);
+		goto out_unlock;
+	}
+
+	newcon.user = context1->user;
+	newcon.role = context1->role;
+	newcon.type = context1->type;
+	rc = mls_context_cpy(&newcon, context2);
+	if (rc)
+		goto out_unlock;
+
+	/* Check the validity of the new context. */
+	if (!policydb_context_isvalid(&policydb, &newcon)) {
+		rc = convert_context_handle_invalid_context(&newcon);
+		if (rc) {
+			if (!context_struct_to_string(&newcon, &s, &len)) {
+				audit_log(current->audit_context,
+					  GFP_ATOMIC, AUDIT_SELINUX_ERR,
+					  "op=security_sid_mls_copy "
+					  "invalid_context=%s", s);
+				kfree(s);
+			}
+			goto out_unlock;
+		}
+	}
+
+	rc = sidtab_context_to_sid(&sidtab, &newcon, new_sid);
+out_unlock:
+	read_unlock(&policy_rwlock);
+	context_destroy(&newcon);
+out:
+	return rc;
+}
+
+/**
+ * security_net_peersid_resolve - Compare and resolve two network peer SIDs
+ * @nlbl_sid: NetLabel SID
+ * @nlbl_type: NetLabel labeling protocol type
+ * @xfrm_sid: XFRM SID
+ *
+ * Description:
+ * Compare the @nlbl_sid and @xfrm_sid values and if the two SIDs can be
+ * resolved into a single SID it is returned via @peer_sid and the function
+ * returns zero.  Otherwise @peer_sid is set to SECSID_NULL and the function
+ * returns a negative value.  A table summarizing the behavior is below:
+ *
+ *                                 | function return |      @sid
+ *   ------------------------------+-----------------+-----------------
+ *   no peer labels                |        0        |    SECSID_NULL
+ *   single peer label             |        0        |    <peer_label>
+ *   multiple, consistent labels   |        0        |    <peer_label>
+ *   multiple, inconsistent labels |    -<errno>     |    SECSID_NULL
+ *
+ */
+int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
+				 u32 xfrm_sid,
+				 u32 *peer_sid)
+{
+	int rc;
+	struct context *nlbl_ctx;
+	struct context *xfrm_ctx;
+
+	*peer_sid = SECSID_NULL;
+
+	/* handle the common (which also happens to be the set of easy) cases
+	 * right away, these two if statements catch everything involving a
+	 * single or absent peer SID/label */
+	if (xfrm_sid == SECSID_NULL) {
+		*peer_sid = nlbl_sid;
+		return 0;
+	}
+	/* NOTE: an nlbl_type == NETLBL_NLTYPE_UNLABELED is a "fallback" label
+	 * and is treated as if nlbl_sid == SECSID_NULL when a XFRM SID/label
+	 * is present */
+	if (nlbl_sid == SECSID_NULL || nlbl_type == NETLBL_NLTYPE_UNLABELED) {
+		*peer_sid = xfrm_sid;
+		return 0;
+	}
+
+	/* we don't need to check ss_initialized here since the only way both
+	 * nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
+	 * security server was initialized and ss_initialized was true */
+	if (!policydb.mls_enabled)
+		return 0;
+
+	read_lock(&policy_rwlock);
+
+	rc = -EINVAL;
+	nlbl_ctx = sidtab_search(&sidtab, nlbl_sid);
+	if (!nlbl_ctx) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, nlbl_sid);
+		goto out;
+	}
+	rc = -EINVAL;
+	xfrm_ctx = sidtab_search(&sidtab, xfrm_sid);
+	if (!xfrm_ctx) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, xfrm_sid);
+		goto out;
+	}
+	rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES);
+	if (rc)
+		goto out;
+
+	/* at present NetLabel SIDs/labels really only carry MLS
+	 * information so if the MLS portion of the NetLabel SID
+	 * matches the MLS portion of the labeled XFRM SID/label
+	 * then pass along the XFRM SID as it is the most
+	 * expressive */
+	*peer_sid = xfrm_sid;
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+static int get_classes_callback(void *k, void *d, void *args)
+{
+	struct class_datum *datum = d;
+	char *name = k, **classes = args;
+	int value = datum->value - 1;
+
+	classes[value] = kstrdup(name, GFP_ATOMIC);
+	if (!classes[value])
+		return -ENOMEM;
+
+	return 0;
+}
+
+int security_get_classes(char ***classes, int *nclasses)
+{
+	int rc;
+
+	read_lock(&policy_rwlock);
+
+	rc = -ENOMEM;
+	*nclasses = policydb.p_classes.nprim;
+	*classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
+	if (!*classes)
+		goto out;
+
+	rc = hashtab_map(policydb.p_classes.table, get_classes_callback,
+			*classes);
+	if (rc) {
+		int i;
+		for (i = 0; i < *nclasses; i++)
+			kfree((*classes)[i]);
+		kfree(*classes);
+	}
+
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+static int get_permissions_callback(void *k, void *d, void *args)
+{
+	struct perm_datum *datum = d;
+	char *name = k, **perms = args;
+	int value = datum->value - 1;
+
+	perms[value] = kstrdup(name, GFP_ATOMIC);
+	if (!perms[value])
+		return -ENOMEM;
+
+	return 0;
+}
+
+int security_get_permissions(char *class, char ***perms, int *nperms)
+{
+	int rc, i;
+	struct class_datum *match;
+
+	read_lock(&policy_rwlock);
+
+	rc = -EINVAL;
+	match = hashtab_search(policydb.p_classes.table, class);
+	if (!match) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized class %s\n",
+			__func__, class);
+		goto out;
+	}
+
+	rc = -ENOMEM;
+	*nperms = match->permissions.nprim;
+	*perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC);
+	if (!*perms)
+		goto out;
+
+	if (match->comdatum) {
+		rc = hashtab_map(match->comdatum->permissions.table,
+				get_permissions_callback, *perms);
+		if (rc)
+			goto err;
+	}
+
+	rc = hashtab_map(match->permissions.table, get_permissions_callback,
+			*perms);
+	if (rc)
+		goto err;
+
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+
+err:
+	read_unlock(&policy_rwlock);
+	for (i = 0; i < *nperms; i++)
+		kfree((*perms)[i]);
+	kfree(*perms);
+	return rc;
+}
+
+int security_get_reject_unknown(void)
+{
+	return policydb.reject_unknown;
+}
+
+int security_get_allow_unknown(void)
+{
+	return policydb.allow_unknown;
+}
+
+/**
+ * security_policycap_supported - Check for a specific policy capability
+ * @req_cap: capability
+ *
+ * Description:
+ * This function queries the currently loaded policy to see if it supports the
+ * capability specified by @req_cap.  Returns true (1) if the capability is
+ * supported, false (0) if it isn't supported.
+ *
+ */
+int security_policycap_supported(unsigned int req_cap)
+{
+	int rc;
+
+	read_lock(&policy_rwlock);
+	rc = ebitmap_get_bit(&policydb.policycaps, req_cap);
+	read_unlock(&policy_rwlock);
+
+	return rc;
+}
+
+struct selinux_audit_rule {
+	u32 au_seqno;
+	struct context au_ctxt;
+};
+
+void selinux_audit_rule_free(void *vrule)
+{
+	struct selinux_audit_rule *rule = vrule;
+
+	if (rule) {
+		context_destroy(&rule->au_ctxt);
+		kfree(rule);
+	}
+}
+
+int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+{
+	struct selinux_audit_rule *tmprule;
+	struct role_datum *roledatum;
+	struct type_datum *typedatum;
+	struct user_datum *userdatum;
+	struct selinux_audit_rule **rule = (struct selinux_audit_rule **)vrule;
+	int rc = 0;
+
+	*rule = NULL;
+
+	if (!ss_initialized)
+		return -EOPNOTSUPP;
+
+	switch (field) {
+	case AUDIT_SUBJ_USER:
+	case AUDIT_SUBJ_ROLE:
+	case AUDIT_SUBJ_TYPE:
+	case AUDIT_OBJ_USER:
+	case AUDIT_OBJ_ROLE:
+	case AUDIT_OBJ_TYPE:
+		/* only 'equals' and 'not equals' fit user, role, and type */
+		if (op != Audit_equal && op != Audit_not_equal)
+			return -EINVAL;
+		break;
+	case AUDIT_SUBJ_SEN:
+	case AUDIT_SUBJ_CLR:
+	case AUDIT_OBJ_LEV_LOW:
+	case AUDIT_OBJ_LEV_HIGH:
+		/* we do not allow a range, indicated by the presence of '-' */
+		if (strchr(rulestr, '-'))
+			return -EINVAL;
+		break;
+	default:
+		/* only the above fields are valid */
+		return -EINVAL;
+	}
+
+	tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
+	if (!tmprule)
+		return -ENOMEM;
+
+	context_init(&tmprule->au_ctxt);
+
+	read_lock(&policy_rwlock);
+
+	tmprule->au_seqno = latest_granting;
+
+	switch (field) {
+	case AUDIT_SUBJ_USER:
+	case AUDIT_OBJ_USER:
+		rc = -EINVAL;
+		userdatum = hashtab_search(policydb.p_users.table, rulestr);
+		if (!userdatum)
+			goto out;
+		tmprule->au_ctxt.user = userdatum->value;
+		break;
+	case AUDIT_SUBJ_ROLE:
+	case AUDIT_OBJ_ROLE:
+		rc = -EINVAL;
+		roledatum = hashtab_search(policydb.p_roles.table, rulestr);
+		if (!roledatum)
+			goto out;
+		tmprule->au_ctxt.role = roledatum->value;
+		break;
+	case AUDIT_SUBJ_TYPE:
+	case AUDIT_OBJ_TYPE:
+		rc = -EINVAL;
+		typedatum = hashtab_search(policydb.p_types.table, rulestr);
+		if (!typedatum)
+			goto out;
+		tmprule->au_ctxt.type = typedatum->value;
+		break;
+	case AUDIT_SUBJ_SEN:
+	case AUDIT_SUBJ_CLR:
+	case AUDIT_OBJ_LEV_LOW:
+	case AUDIT_OBJ_LEV_HIGH:
+		rc = mls_from_string(rulestr, &tmprule->au_ctxt, GFP_ATOMIC);
+		if (rc)
+			goto out;
+		break;
+	}
+	rc = 0;
+out:
+	read_unlock(&policy_rwlock);
+
+	if (rc) {
+		selinux_audit_rule_free(tmprule);
+		tmprule = NULL;
+	}
+
+	*rule = tmprule;
+
+	return rc;
+}
+
+/* Check to see if the rule contains any selinux fields */
+int selinux_audit_rule_known(struct audit_krule *rule)
+{
+	int i;
+
+	for (i = 0; i < rule->field_count; i++) {
+		struct audit_field *f = &rule->fields[i];
+		switch (f->type) {
+		case AUDIT_SUBJ_USER:
+		case AUDIT_SUBJ_ROLE:
+		case AUDIT_SUBJ_TYPE:
+		case AUDIT_SUBJ_SEN:
+		case AUDIT_SUBJ_CLR:
+		case AUDIT_OBJ_USER:
+		case AUDIT_OBJ_ROLE:
+		case AUDIT_OBJ_TYPE:
+		case AUDIT_OBJ_LEV_LOW:
+		case AUDIT_OBJ_LEV_HIGH:
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule,
+			     struct audit_context *actx)
+{
+	struct context *ctxt;
+	struct mls_level *level;
+	struct selinux_audit_rule *rule = vrule;
+	int match = 0;
+
+	if (unlikely(!rule)) {
+		WARN_ONCE(1, "selinux_audit_rule_match: missing rule\n");
+		return -ENOENT;
+	}
+
+	read_lock(&policy_rwlock);
+
+	if (rule->au_seqno < latest_granting) {
+		match = -ESTALE;
+		goto out;
+	}
+
+	ctxt = sidtab_search(&sidtab, sid);
+	if (unlikely(!ctxt)) {
+		WARN_ONCE(1, "selinux_audit_rule_match: unrecognized SID %d\n",
+			  sid);
+		match = -ENOENT;
+		goto out;
+	}
+
+	/* a field/op pair that is not caught here will simply fall through
+	   without a match */
+	switch (field) {
+	case AUDIT_SUBJ_USER:
+	case AUDIT_OBJ_USER:
+		switch (op) {
+		case Audit_equal:
+			match = (ctxt->user == rule->au_ctxt.user);
+			break;
+		case Audit_not_equal:
+			match = (ctxt->user != rule->au_ctxt.user);
+			break;
+		}
+		break;
+	case AUDIT_SUBJ_ROLE:
+	case AUDIT_OBJ_ROLE:
+		switch (op) {
+		case Audit_equal:
+			match = (ctxt->role == rule->au_ctxt.role);
+			break;
+		case Audit_not_equal:
+			match = (ctxt->role != rule->au_ctxt.role);
+			break;
+		}
+		break;
+	case AUDIT_SUBJ_TYPE:
+	case AUDIT_OBJ_TYPE:
+		switch (op) {
+		case Audit_equal:
+			match = (ctxt->type == rule->au_ctxt.type);
+			break;
+		case Audit_not_equal:
+			match = (ctxt->type != rule->au_ctxt.type);
+			break;
+		}
+		break;
+	case AUDIT_SUBJ_SEN:
+	case AUDIT_SUBJ_CLR:
+	case AUDIT_OBJ_LEV_LOW:
+	case AUDIT_OBJ_LEV_HIGH:
+		level = ((field == AUDIT_SUBJ_SEN ||
+			  field == AUDIT_OBJ_LEV_LOW) ?
+			 &ctxt->range.level[0] : &ctxt->range.level[1]);
+		switch (op) {
+		case Audit_equal:
+			match = mls_level_eq(&rule->au_ctxt.range.level[0],
+					     level);
+			break;
+		case Audit_not_equal:
+			match = !mls_level_eq(&rule->au_ctxt.range.level[0],
+					      level);
+			break;
+		case Audit_lt:
+			match = (mls_level_dom(&rule->au_ctxt.range.level[0],
+					       level) &&
+				 !mls_level_eq(&rule->au_ctxt.range.level[0],
+					       level));
+			break;
+		case Audit_le:
+			match = mls_level_dom(&rule->au_ctxt.range.level[0],
+					      level);
+			break;
+		case Audit_gt:
+			match = (mls_level_dom(level,
+					      &rule->au_ctxt.range.level[0]) &&
+				 !mls_level_eq(level,
+					       &rule->au_ctxt.range.level[0]));
+			break;
+		case Audit_ge:
+			match = mls_level_dom(level,
+					      &rule->au_ctxt.range.level[0]);
+			break;
+		}
+	}
+
+out:
+	read_unlock(&policy_rwlock);
+	return match;
+}
+
+static int (*aurule_callback)(void) = audit_update_lsm_rules;
+
+static int aurule_avc_callback(u32 event)
+{
+	int err = 0;
+
+	if (event == AVC_CALLBACK_RESET && aurule_callback)
+		err = aurule_callback();
+	return err;
+}
+
+static int __init aurule_init(void)
+{
+	int err;
+
+	err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET);
+	if (err)
+		panic("avc_add_callback() failed, error %d\n", err);
+
+	return err;
+}
+__initcall(aurule_init);
+
+#ifdef CONFIG_NETLABEL
+/**
+ * security_netlbl_cache_add - Add an entry to the NetLabel cache
+ * @secattr: the NetLabel packet security attributes
+ * @sid: the SELinux SID
+ *
+ * Description:
+ * Attempt to cache the context in @ctx, which was derived from the packet in
+ * @skb, in the NetLabel subsystem cache.  This function assumes @secattr has
+ * already been initialized.
+ *
+ */
+static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
+				      u32 sid)
+{
+	u32 *sid_cache;
+
+	sid_cache = kmalloc(sizeof(*sid_cache), GFP_ATOMIC);
+	if (sid_cache == NULL)
+		return;
+	secattr->cache = netlbl_secattr_cache_alloc(GFP_ATOMIC);
+	if (secattr->cache == NULL) {
+		kfree(sid_cache);
+		return;
+	}
+
+	*sid_cache = sid;
+	secattr->cache->free = kfree;
+	secattr->cache->data = sid_cache;
+	secattr->flags |= NETLBL_SECATTR_CACHE;
+}
+
+/**
+ * security_netlbl_secattr_to_sid - Convert a NetLabel secattr to a SELinux SID
+ * @secattr: the NetLabel packet security attributes
+ * @sid: the SELinux SID
+ *
+ * Description:
+ * Convert the given NetLabel security attributes in @secattr into a
+ * SELinux SID.  If the @secattr field does not contain a full SELinux
+ * SID/context then use SECINITSID_NETMSG as the foundation.  If possible the
+ * 'cache' field of @secattr is set and the CACHE flag is set; this is to
+ * allow the @secattr to be used by NetLabel to cache the secattr to SID
+ * conversion for future lookups.  Returns zero on success, negative values on
+ * failure.
+ *
+ */
+int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
+				   u32 *sid)
+{
+	int rc;
+	struct context *ctx;
+	struct context ctx_new;
+
+	if (!ss_initialized) {
+		*sid = SECSID_NULL;
+		return 0;
+	}
+
+	read_lock(&policy_rwlock);
+
+	if (secattr->flags & NETLBL_SECATTR_CACHE)
+		*sid = *(u32 *)secattr->cache->data;
+	else if (secattr->flags & NETLBL_SECATTR_SECID)
+		*sid = secattr->attr.secid;
+	else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) {
+		rc = -EIDRM;
+		ctx = sidtab_search(&sidtab, SECINITSID_NETMSG);
+		if (ctx == NULL)
+			goto out;
+
+		context_init(&ctx_new);
+		ctx_new.user = ctx->user;
+		ctx_new.role = ctx->role;
+		ctx_new.type = ctx->type;
+		mls_import_netlbl_lvl(&ctx_new, secattr);
+		if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
+			rc = mls_import_netlbl_cat(&ctx_new, secattr);
+			if (rc)
+				goto out;
+		}
+		rc = -EIDRM;
+		if (!mls_context_isvalid(&policydb, &ctx_new))
+			goto out_free;
+
+		rc = sidtab_context_to_sid(&sidtab, &ctx_new, sid);
+		if (rc)
+			goto out_free;
+
+		security_netlbl_cache_add(secattr, *sid);
+
+		ebitmap_destroy(&ctx_new.range.level[0].cat);
+	} else
+		*sid = SECSID_NULL;
+
+	read_unlock(&policy_rwlock);
+	return 0;
+out_free:
+	ebitmap_destroy(&ctx_new.range.level[0].cat);
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+/**
+ * security_netlbl_sid_to_secattr - Convert a SELinux SID to a NetLabel secattr
+ * @sid: the SELinux SID
+ * @secattr: the NetLabel packet security attributes
+ *
+ * Description:
+ * Convert the given SELinux SID in @sid into a NetLabel security attribute.
+ * Returns zero on success, negative values on failure.
+ *
+ */
+int security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr)
+{
+	int rc;
+	struct context *ctx;
+
+	if (!ss_initialized)
+		return 0;
+
+	read_lock(&policy_rwlock);
+
+	rc = -ENOENT;
+	ctx = sidtab_search(&sidtab, sid);
+	if (ctx == NULL)
+		goto out;
+
+	rc = -ENOMEM;
+	secattr->domain = kstrdup(sym_name(&policydb, SYM_TYPES, ctx->type - 1),
+				  GFP_ATOMIC);
+	if (secattr->domain == NULL)
+		goto out;
+
+	secattr->attr.secid = sid;
+	secattr->flags |= NETLBL_SECATTR_DOMAIN_CPY | NETLBL_SECATTR_SECID;
+	mls_export_netlbl_lvl(ctx, secattr);
+	rc = mls_export_netlbl_cat(ctx, secattr);
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+#endif /* CONFIG_NETLABEL */
+
+/**
+ * security_read_policy - read the policy.
+ * @data: binary policy data
+ * @len: length of data in bytes
+ *
+ */
+int security_read_policy(void **data, size_t *len)
+{
+	int rc;
+	struct policy_file fp;
+
+	if (!ss_initialized)
+		return -EINVAL;
+
+	*len = security_policydb_len();
+
+	*data = vmalloc_user(*len);
+	if (!*data)
+		return -ENOMEM;
+
+	fp.data = *data;
+	fp.len = *len;
+
+	read_lock(&policy_rwlock);
+	rc = policydb_write(&policydb, &fp);
+	read_unlock(&policy_rwlock);
+
+	if (rc)
+		return rc;
+
+	*len = (unsigned long)fp.data - (unsigned long)*data;
+	return 0;
+
+}
diff --git a/security/selinux/ss/services.h b/security/selinux/ss/services.h
new file mode 100644
index 0000000..6abcd87
--- /dev/null
+++ b/security/selinux/ss/services.h
@@ -0,0 +1,21 @@
+/*
+ * Implementation of the security services.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#ifndef _SS_SERVICES_H_
+#define _SS_SERVICES_H_
+
+#include "policydb.h"
+#include "sidtab.h"
+
+extern struct policydb policydb;
+
+void services_compute_xperms_drivers(struct extended_perms *xperms,
+				struct avtab_node *node);
+
+void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+					struct avtab_node *node);
+
+#endif	/* _SS_SERVICES_H_ */
+
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
new file mode 100644
index 0000000..5840a35
--- /dev/null
+++ b/security/selinux/ss/sidtab.c
@@ -0,0 +1,313 @@
+/*
+ * Implementation of the SID table type.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include "flask.h"
+#include "security.h"
+#include "sidtab.h"
+
+#define SIDTAB_HASH(sid) \
+(sid & SIDTAB_HASH_MASK)
+
+int sidtab_init(struct sidtab *s)
+{
+	int i;
+
+	s->htable = kmalloc(sizeof(*(s->htable)) * SIDTAB_SIZE, GFP_ATOMIC);
+	if (!s->htable)
+		return -ENOMEM;
+	for (i = 0; i < SIDTAB_SIZE; i++)
+		s->htable[i] = NULL;
+	s->nel = 0;
+	s->next_sid = 1;
+	s->shutdown = 0;
+	spin_lock_init(&s->lock);
+	return 0;
+}
+
+int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
+{
+	int hvalue, rc = 0;
+	struct sidtab_node *prev, *cur, *newnode;
+
+	if (!s) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	hvalue = SIDTAB_HASH(sid);
+	prev = NULL;
+	cur = s->htable[hvalue];
+	while (cur && sid > cur->sid) {
+		prev = cur;
+		cur = cur->next;
+	}
+
+	if (cur && sid == cur->sid) {
+		rc = -EEXIST;
+		goto out;
+	}
+
+	newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC);
+	if (newnode == NULL) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	newnode->sid = sid;
+	if (context_cpy(&newnode->context, context)) {
+		kfree(newnode);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if (prev) {
+		newnode->next = prev->next;
+		wmb();
+		prev->next = newnode;
+	} else {
+		newnode->next = s->htable[hvalue];
+		wmb();
+		s->htable[hvalue] = newnode;
+	}
+
+	s->nel++;
+	if (sid >= s->next_sid)
+		s->next_sid = sid + 1;
+out:
+	return rc;
+}
+
+static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
+{
+	int hvalue;
+	struct sidtab_node *cur;
+
+	if (!s)
+		return NULL;
+
+	hvalue = SIDTAB_HASH(sid);
+	cur = s->htable[hvalue];
+	while (cur && sid > cur->sid)
+		cur = cur->next;
+
+	if (force && cur && sid == cur->sid && cur->context.len)
+		return &cur->context;
+
+	if (cur == NULL || sid != cur->sid || cur->context.len) {
+		/* Remap invalid SIDs to the unlabeled SID. */
+		sid = SECINITSID_UNLABELED;
+		hvalue = SIDTAB_HASH(sid);
+		cur = s->htable[hvalue];
+		while (cur && sid > cur->sid)
+			cur = cur->next;
+		if (!cur || sid != cur->sid)
+			return NULL;
+	}
+
+	return &cur->context;
+}
+
+struct context *sidtab_search(struct sidtab *s, u32 sid)
+{
+	return sidtab_search_core(s, sid, 0);
+}
+
+struct context *sidtab_search_force(struct sidtab *s, u32 sid)
+{
+	return sidtab_search_core(s, sid, 1);
+}
+
+int sidtab_map(struct sidtab *s,
+	       int (*apply) (u32 sid,
+			     struct context *context,
+			     void *args),
+	       void *args)
+{
+	int i, rc = 0;
+	struct sidtab_node *cur;
+
+	if (!s)
+		goto out;
+
+	for (i = 0; i < SIDTAB_SIZE; i++) {
+		cur = s->htable[i];
+		while (cur) {
+			rc = apply(cur->sid, &cur->context, args);
+			if (rc)
+				goto out;
+			cur = cur->next;
+		}
+	}
+out:
+	return rc;
+}
+
+static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc)
+{
+	BUG_ON(loc >= SIDTAB_CACHE_LEN);
+
+	while (loc > 0) {
+		s->cache[loc] = s->cache[loc - 1];
+		loc--;
+	}
+	s->cache[0] = n;
+}
+
+static inline u32 sidtab_search_context(struct sidtab *s,
+						  struct context *context)
+{
+	int i;
+	struct sidtab_node *cur;
+
+	for (i = 0; i < SIDTAB_SIZE; i++) {
+		cur = s->htable[i];
+		while (cur) {
+			if (context_cmp(&cur->context, context)) {
+				sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1);
+				return cur->sid;
+			}
+			cur = cur->next;
+		}
+	}
+	return 0;
+}
+
+static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context)
+{
+	int i;
+	struct sidtab_node *node;
+
+	for (i = 0; i < SIDTAB_CACHE_LEN; i++) {
+		node = s->cache[i];
+		if (unlikely(!node))
+			return 0;
+		if (context_cmp(&node->context, context)) {
+			sidtab_update_cache(s, node, i);
+			return node->sid;
+		}
+	}
+	return 0;
+}
+
+int sidtab_context_to_sid(struct sidtab *s,
+			  struct context *context,
+			  u32 *out_sid)
+{
+	u32 sid;
+	int ret = 0;
+	unsigned long flags;
+
+	*out_sid = SECSID_NULL;
+
+	sid  = sidtab_search_cache(s, context);
+	if (!sid)
+		sid = sidtab_search_context(s, context);
+	if (!sid) {
+		spin_lock_irqsave(&s->lock, flags);
+		/* Rescan now that we hold the lock. */
+		sid = sidtab_search_context(s, context);
+		if (sid)
+			goto unlock_out;
+		/* No SID exists for the context.  Allocate a new one. */
+		if (s->next_sid == UINT_MAX || s->shutdown) {
+			ret = -ENOMEM;
+			goto unlock_out;
+		}
+		sid = s->next_sid++;
+		if (context->len)
+			printk(KERN_INFO
+		       "SELinux:  Context %s is not valid (left unmapped).\n",
+			       context->str);
+		ret = sidtab_insert(s, sid, context);
+		if (ret)
+			s->next_sid--;
+unlock_out:
+		spin_unlock_irqrestore(&s->lock, flags);
+	}
+
+	if (ret)
+		return ret;
+
+	*out_sid = sid;
+	return 0;
+}
+
+void sidtab_hash_eval(struct sidtab *h, char *tag)
+{
+	int i, chain_len, slots_used, max_chain_len;
+	struct sidtab_node *cur;
+
+	slots_used = 0;
+	max_chain_len = 0;
+	for (i = 0; i < SIDTAB_SIZE; i++) {
+		cur = h->htable[i];
+		if (cur) {
+			slots_used++;
+			chain_len = 0;
+			while (cur) {
+				chain_len++;
+				cur = cur->next;
+			}
+
+			if (chain_len > max_chain_len)
+				max_chain_len = chain_len;
+		}
+	}
+
+	printk(KERN_DEBUG "%s:  %d entries and %d/%d buckets used, longest "
+	       "chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE,
+	       max_chain_len);
+}
+
+void sidtab_destroy(struct sidtab *s)
+{
+	int i;
+	struct sidtab_node *cur, *temp;
+
+	if (!s)
+		return;
+
+	for (i = 0; i < SIDTAB_SIZE; i++) {
+		cur = s->htable[i];
+		while (cur) {
+			temp = cur;
+			cur = cur->next;
+			context_destroy(&temp->context);
+			kfree(temp);
+		}
+		s->htable[i] = NULL;
+	}
+	kfree(s->htable);
+	s->htable = NULL;
+	s->nel = 0;
+	s->next_sid = 1;
+}
+
+void sidtab_set(struct sidtab *dst, struct sidtab *src)
+{
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&src->lock, flags);
+	dst->htable = src->htable;
+	dst->nel = src->nel;
+	dst->next_sid = src->next_sid;
+	dst->shutdown = 0;
+	for (i = 0; i < SIDTAB_CACHE_LEN; i++)
+		dst->cache[i] = NULL;
+	spin_unlock_irqrestore(&src->lock, flags);
+}
+
+void sidtab_shutdown(struct sidtab *s)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&s->lock, flags);
+	s->shutdown = 1;
+	spin_unlock_irqrestore(&s->lock, flags);
+}
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
new file mode 100644
index 0000000..84dc154
--- /dev/null
+++ b/security/selinux/ss/sidtab.h
@@ -0,0 +1,56 @@
+/*
+ * A security identifier table (sidtab) is a hash table
+ * of security context structures indexed by SID value.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#ifndef _SS_SIDTAB_H_
+#define _SS_SIDTAB_H_
+
+#include "context.h"
+
+struct sidtab_node {
+	u32 sid;		/* security identifier */
+	struct context context;	/* security context structure */
+	struct sidtab_node *next;
+};
+
+#define SIDTAB_HASH_BITS 7
+#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS)
+#define SIDTAB_HASH_MASK (SIDTAB_HASH_BUCKETS-1)
+
+#define SIDTAB_SIZE SIDTAB_HASH_BUCKETS
+
+struct sidtab {
+	struct sidtab_node **htable;
+	unsigned int nel;	/* number of elements */
+	unsigned int next_sid;	/* next SID to allocate */
+	unsigned char shutdown;
+#define SIDTAB_CACHE_LEN	3
+	struct sidtab_node *cache[SIDTAB_CACHE_LEN];
+	spinlock_t lock;
+};
+
+int sidtab_init(struct sidtab *s);
+int sidtab_insert(struct sidtab *s, u32 sid, struct context *context);
+struct context *sidtab_search(struct sidtab *s, u32 sid);
+struct context *sidtab_search_force(struct sidtab *s, u32 sid);
+
+int sidtab_map(struct sidtab *s,
+	       int (*apply) (u32 sid,
+			     struct context *context,
+			     void *args),
+	       void *args);
+
+int sidtab_context_to_sid(struct sidtab *s,
+			  struct context *context,
+			  u32 *sid);
+
+void sidtab_hash_eval(struct sidtab *h, char *tag);
+void sidtab_destroy(struct sidtab *s);
+void sidtab_set(struct sidtab *dst, struct sidtab *src);
+void sidtab_shutdown(struct sidtab *s);
+
+#endif	/* _SS_SIDTAB_H_ */
+
+
diff --git a/security/selinux/ss/status.c b/security/selinux/ss/status.c
new file mode 100644
index 0000000..d982365
--- /dev/null
+++ b/security/selinux/ss/status.c
@@ -0,0 +1,126 @@
+/*
+ * mmap based event notifications for SELinux
+ *
+ * Author: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * Copyright (C) 2010 NEC corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include "avc.h"
+#include "services.h"
+
+/*
+ * The selinux_status_page shall be exposed to userspace applications
+ * using mmap interface on /selinux/status.
+ * It enables to notify applications a few events that will cause reset
+ * of userspace access vector without context switching.
+ *
+ * The selinux_kernel_status structure on the head of status page is
+ * protected from concurrent accesses using seqlock logic, so userspace
+ * application should reference the status page according to the seqlock
+ * logic.
+ *
+ * Typically, application checks status->sequence at the head of access
+ * control routine. If it is odd-number, kernel is updating the status,
+ * so please wait for a moment. If it is changed from the last sequence
+ * number, it means something happen, so application will reset userspace
+ * avc, if needed.
+ * In most cases, application shall confirm the kernel status is not
+ * changed without any system call invocations.
+ */
+static struct page *selinux_status_page;
+static DEFINE_MUTEX(selinux_status_lock);
+
+/*
+ * selinux_kernel_status_page
+ *
+ * It returns a reference to selinux_status_page. If the status page is
+ * not allocated yet, it also tries to allocate it at the first time.
+ */
+struct page *selinux_kernel_status_page(void)
+{
+	struct selinux_kernel_status   *status;
+	struct page		       *result = NULL;
+
+	mutex_lock(&selinux_status_lock);
+	if (!selinux_status_page) {
+		selinux_status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+
+		if (selinux_status_page) {
+			status = page_address(selinux_status_page);
+
+			status->version = SELINUX_KERNEL_STATUS_VERSION;
+			status->sequence = 0;
+			status->enforcing = selinux_enforcing;
+			/*
+			 * NOTE: the next policyload event shall set
+			 * a positive value on the status->policyload,
+			 * although it may not be 1, but never zero.
+			 * So, application can know it was updated.
+			 */
+			status->policyload = 0;
+			status->deny_unknown = !security_get_allow_unknown();
+		}
+	}
+	result = selinux_status_page;
+	mutex_unlock(&selinux_status_lock);
+
+	return result;
+}
+
+/*
+ * selinux_status_update_setenforce
+ *
+ * It updates status of the current enforcing/permissive mode.
+ */
+void selinux_status_update_setenforce(int enforcing)
+{
+	struct selinux_kernel_status   *status;
+
+	mutex_lock(&selinux_status_lock);
+	if (selinux_status_page) {
+		status = page_address(selinux_status_page);
+
+		status->sequence++;
+		smp_wmb();
+
+		status->enforcing = enforcing;
+
+		smp_wmb();
+		status->sequence++;
+	}
+	mutex_unlock(&selinux_status_lock);
+}
+
+/*
+ * selinux_status_update_policyload
+ *
+ * It updates status of the times of policy reloaded, and current
+ * setting of deny_unknown.
+ */
+void selinux_status_update_policyload(int seqno)
+{
+	struct selinux_kernel_status   *status;
+
+	mutex_lock(&selinux_status_lock);
+	if (selinux_status_page) {
+		status = page_address(selinux_status_page);
+
+		status->sequence++;
+		smp_wmb();
+
+		status->policyload = seqno;
+		status->deny_unknown = !security_get_allow_unknown();
+
+		smp_wmb();
+		status->sequence++;
+	}
+	mutex_unlock(&selinux_status_lock);
+}
diff --git a/security/selinux/ss/symtab.c b/security/selinux/ss/symtab.c
new file mode 100644
index 0000000..160326e
--- /dev/null
+++ b/security/selinux/ss/symtab.c
@@ -0,0 +1,43 @@
+/*
+ * Implementation of the symbol table type.
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include "symtab.h"
+
+static unsigned int symhash(struct hashtab *h, const void *key)
+{
+	const char *p, *keyp;
+	unsigned int size;
+	unsigned int val;
+
+	val = 0;
+	keyp = key;
+	size = strlen(keyp);
+	for (p = keyp; (p - keyp) < size; p++)
+		val = (val << 4 | (val >> (8*sizeof(unsigned int)-4))) ^ (*p);
+	return val & (h->size - 1);
+}
+
+static int symcmp(struct hashtab *h, const void *key1, const void *key2)
+{
+	const char *keyp1, *keyp2;
+
+	keyp1 = key1;
+	keyp2 = key2;
+	return strcmp(keyp1, keyp2);
+}
+
+
+int symtab_init(struct symtab *s, unsigned int size)
+{
+	s->table = hashtab_create(symhash, symcmp, size);
+	if (!s->table)
+		return -ENOMEM;
+	s->nprim = 0;
+	return 0;
+}
+
diff --git a/security/selinux/ss/symtab.h b/security/selinux/ss/symtab.h
new file mode 100644
index 0000000..ca422b4
--- /dev/null
+++ b/security/selinux/ss/symtab.h
@@ -0,0 +1,23 @@
+/*
+ * A symbol table (symtab) maintains associations between symbol
+ * strings and datum values.  The type of the datum values
+ * is arbitrary.  The symbol table type is implemented
+ * using the hash table type (hashtab).
+ *
+ * Author : Stephen Smalley, <sds@epoch.ncsc.mil>
+ */
+#ifndef _SS_SYMTAB_H_
+#define _SS_SYMTAB_H_
+
+#include "hashtab.h"
+
+struct symtab {
+	struct hashtab *table;	/* hash table (keyed on a string) */
+	u32 nprim;		/* number of primary names in table */
+};
+
+int symtab_init(struct symtab *s, unsigned int size);
+
+#endif	/* _SS_SYMTAB_H_ */
+
+
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
new file mode 100644
index 0000000..56e354f
--- /dev/null
+++ b/security/selinux/xfrm.c
@@ -0,0 +1,468 @@
+/*
+ *  NSA Security-Enhanced Linux (SELinux) security module
+ *
+ *  This file contains the SELinux XFRM hook function implementations.
+ *
+ *  Authors:  Serge Hallyn <sergeh@us.ibm.com>
+ *	      Trent Jaeger <jaegert@us.ibm.com>
+ *
+ *  Updated: Venkat Yekkirala <vyekkirala@TrustedCS.com>
+ *
+ *           Granular IPSec Associations for use in MLS environments.
+ *
+ *  Copyright (C) 2005 International Business Machines Corporation
+ *  Copyright (C) 2006 Trusted Computer Solutions, Inc.
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License version 2,
+ *	as published by the Free Software Foundation.
+ */
+
+/*
+ * USAGE:
+ * NOTES:
+ *   1. Make sure to enable the following options in your kernel config:
+ *	CONFIG_SECURITY=y
+ *	CONFIG_SECURITY_NETWORK=y
+ *	CONFIG_SECURITY_NETWORK_XFRM=y
+ *	CONFIG_SECURITY_SELINUX=m/y
+ * ISSUES:
+ *   1. Caching packets, so they are not dropped during negotiation
+ *   2. Emulating a reasonable SO_PEERSEC across machines
+ *   3. Testing addition of sk_policy's with security context via setsockopt
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/security.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/xfrm.h>
+#include <net/xfrm.h>
+#include <net/checksum.h>
+#include <net/udp.h>
+#include <linux/atomic.h>
+
+#include "avc.h"
+#include "objsec.h"
+#include "xfrm.h"
+
+/* Labeled XFRM instance counter */
+atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0);
+
+/*
+ * Returns true if the context is an LSM/SELinux context.
+ */
+static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
+{
+	return (ctx &&
+		(ctx->ctx_doi == XFRM_SC_DOI_LSM) &&
+		(ctx->ctx_alg == XFRM_SC_ALG_SELINUX));
+}
+
+/*
+ * Returns true if the xfrm contains a security blob for SELinux.
+ */
+static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
+{
+	return selinux_authorizable_ctx(x->security);
+}
+
+/*
+ * Allocates a xfrm_sec_state and populates it using the supplied security
+ * xfrm_user_sec_ctx context.
+ */
+static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
+				   struct xfrm_user_sec_ctx *uctx,
+				   gfp_t gfp)
+{
+	int rc;
+	const struct task_security_struct *tsec = current_security();
+	struct xfrm_sec_ctx *ctx = NULL;
+	u32 str_len;
+
+	if (ctxp == NULL || uctx == NULL ||
+	    uctx->ctx_doi != XFRM_SC_DOI_LSM ||
+	    uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
+		return -EINVAL;
+
+	str_len = uctx->ctx_len;
+	if (str_len >= PAGE_SIZE)
+		return -ENOMEM;
+
+	ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->ctx_doi = XFRM_SC_DOI_LSM;
+	ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+	ctx->ctx_len = str_len;
+	memcpy(ctx->ctx_str, &uctx[1], str_len);
+	ctx->ctx_str[str_len] = '\0';
+	rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp);
+	if (rc)
+		goto err;
+
+	rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
+			  SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL);
+	if (rc)
+		goto err;
+
+	*ctxp = ctx;
+	atomic_inc(&selinux_xfrm_refcount);
+	return 0;
+
+err:
+	kfree(ctx);
+	return rc;
+}
+
+/*
+ * Free the xfrm_sec_ctx structure.
+ */
+static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx)
+{
+	if (!ctx)
+		return;
+
+	atomic_dec(&selinux_xfrm_refcount);
+	kfree(ctx);
+}
+
+/*
+ * Authorize the deletion of a labeled SA or policy rule.
+ */
+static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
+{
+	const struct task_security_struct *tsec = current_security();
+
+	if (!ctx)
+		return 0;
+
+	return avc_has_perm(tsec->sid, ctx->ctx_sid,
+			    SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+			    NULL);
+}
+
+/*
+ * LSM hook implementation that authorizes that a flow can use a xfrm policy
+ * rule.
+ */
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+{
+	int rc;
+
+	/* All flows should be treated as polmatch'ing an otherwise applicable
+	 * "non-labeled" policy. This would prevent inadvertent "leaks". */
+	if (!ctx)
+		return 0;
+
+	/* Context sid is either set to label or ANY_ASSOC */
+	if (!selinux_authorizable_ctx(ctx))
+		return -EINVAL;
+
+	rc = avc_has_perm(fl_secid, ctx->ctx_sid,
+			  SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL);
+	return (rc == -EACCES ? -ESRCH : rc);
+}
+
+/*
+ * LSM hook implementation that authorizes that a state matches
+ * the given policy, flow combo.
+ */
+int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
+				      struct xfrm_policy *xp,
+				      const struct flowi *fl)
+{
+	u32 state_sid;
+
+	if (!xp->security)
+		if (x->security)
+			/* unlabeled policy and labeled SA can't match */
+			return 0;
+		else
+			/* unlabeled policy and unlabeled SA match all flows */
+			return 1;
+	else
+		if (!x->security)
+			/* unlabeled SA and labeled policy can't match */
+			return 0;
+		else
+			if (!selinux_authorizable_xfrm(x))
+				/* Not a SELinux-labeled SA */
+				return 0;
+
+	state_sid = x->security->ctx_sid;
+
+	if (fl->flowi_secid != state_sid)
+		return 0;
+
+	/* We don't need a separate SA Vs. policy polmatch check since the SA
+	 * is now of the same label as the flow and a flow Vs. policy polmatch
+	 * check had already happened in selinux_xfrm_policy_lookup() above. */
+	return (avc_has_perm(fl->flowi_secid, state_sid,
+			    SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
+			    NULL) ? 0 : 1);
+}
+
+static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+	struct xfrm_state *x;
+
+	if (dst == NULL)
+		return SECSID_NULL;
+	x = dst->xfrm;
+	if (x == NULL || !selinux_authorizable_xfrm(x))
+		return SECSID_NULL;
+
+	return x->security->ctx_sid;
+}
+
+static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
+					u32 *sid, int ckall)
+{
+	u32 sid_session = SECSID_NULL;
+	struct sec_path *sp = skb->sp;
+
+	if (sp) {
+		int i;
+
+		for (i = sp->len - 1; i >= 0; i--) {
+			struct xfrm_state *x = sp->xvec[i];
+			if (selinux_authorizable_xfrm(x)) {
+				struct xfrm_sec_ctx *ctx = x->security;
+
+				if (sid_session == SECSID_NULL) {
+					sid_session = ctx->ctx_sid;
+					if (!ckall)
+						goto out;
+				} else if (sid_session != ctx->ctx_sid) {
+					*sid = SECSID_NULL;
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
+out:
+	*sid = sid_session;
+	return 0;
+}
+
+/*
+ * LSM hook implementation that checks and/or returns the xfrm sid for the
+ * incoming packet.
+ */
+int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+{
+	if (skb == NULL) {
+		*sid = SECSID_NULL;
+		return 0;
+	}
+	return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
+}
+
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
+{
+	int rc;
+
+	rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
+	if (rc == 0 && *sid == SECSID_NULL)
+		*sid = selinux_xfrm_skb_sid_egress(skb);
+
+	return rc;
+}
+
+/*
+ * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
+ */
+int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+			      struct xfrm_user_sec_ctx *uctx,
+			      gfp_t gfp)
+{
+	return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
+}
+
+/*
+ * LSM hook implementation that copies security data structure from old to new
+ * for policy cloning.
+ */
+int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
+			      struct xfrm_sec_ctx **new_ctxp)
+{
+	struct xfrm_sec_ctx *new_ctx;
+
+	if (!old_ctx)
+		return 0;
+
+	new_ctx = kmemdup(old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len,
+			  GFP_ATOMIC);
+	if (!new_ctx)
+		return -ENOMEM;
+	atomic_inc(&selinux_xfrm_refcount);
+	*new_ctxp = new_ctx;
+
+	return 0;
+}
+
+/*
+ * LSM hook implementation that frees xfrm_sec_ctx security information.
+ */
+void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
+{
+	selinux_xfrm_free(ctx);
+}
+
+/*
+ * LSM hook implementation that authorizes deletion of labeled policies.
+ */
+int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
+{
+	return selinux_xfrm_delete(ctx);
+}
+
+/*
+ * LSM hook implementation that allocates a xfrm_sec_state, populates it using
+ * the supplied security context, and assigns it to the xfrm_state.
+ */
+int selinux_xfrm_state_alloc(struct xfrm_state *x,
+			     struct xfrm_user_sec_ctx *uctx)
+{
+	return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL);
+}
+
+/*
+ * LSM hook implementation that allocates a xfrm_sec_state and populates based
+ * on a secid.
+ */
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+				     struct xfrm_sec_ctx *polsec, u32 secid)
+{
+	int rc;
+	struct xfrm_sec_ctx *ctx;
+	char *ctx_str = NULL;
+	int str_len;
+
+	if (!polsec)
+		return 0;
+
+	if (secid == 0)
+		return -EINVAL;
+
+	rc = security_sid_to_context(secid, &ctx_str, &str_len);
+	if (rc)
+		return rc;
+
+	ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC);
+	if (!ctx) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	ctx->ctx_doi = XFRM_SC_DOI_LSM;
+	ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+	ctx->ctx_sid = secid;
+	ctx->ctx_len = str_len;
+	memcpy(ctx->ctx_str, ctx_str, str_len);
+
+	x->security = ctx;
+	atomic_inc(&selinux_xfrm_refcount);
+out:
+	kfree(ctx_str);
+	return rc;
+}
+
+/*
+ * LSM hook implementation that frees xfrm_state security information.
+ */
+void selinux_xfrm_state_free(struct xfrm_state *x)
+{
+	selinux_xfrm_free(x->security);
+}
+
+/*
+ * LSM hook implementation that authorizes deletion of labeled SAs.
+ */
+int selinux_xfrm_state_delete(struct xfrm_state *x)
+{
+	return selinux_xfrm_delete(x->security);
+}
+
+/*
+ * LSM hook that controls access to unlabelled packets.  If
+ * a xfrm_state is authorizable (defined by macro) then it was
+ * already authorized by the IPSec process.  If not, then
+ * we need to check for unlabelled access since this may not have
+ * gone thru the IPSec process.
+ */
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+			      struct common_audit_data *ad)
+{
+	int i;
+	struct sec_path *sp = skb->sp;
+	u32 peer_sid = SECINITSID_UNLABELED;
+
+	if (sp) {
+		for (i = 0; i < sp->len; i++) {
+			struct xfrm_state *x = sp->xvec[i];
+
+			if (x && selinux_authorizable_xfrm(x)) {
+				struct xfrm_sec_ctx *ctx = x->security;
+				peer_sid = ctx->ctx_sid;
+				break;
+			}
+		}
+	}
+
+	/* This check even when there's no association involved is intended,
+	 * according to Trent Jaeger, to make sure a process can't engage in
+	 * non-IPsec communication unless explicitly allowed by policy. */
+	return avc_has_perm(sk_sid, peer_sid,
+			    SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad);
+}
+
+/*
+ * POSTROUTE_LAST hook's XFRM processing:
+ * If we have no security association, then we need to determine
+ * whether the socket is allowed to send to an unlabelled destination.
+ * If we do have a authorizable security association, then it has already been
+ * checked in the selinux_xfrm_state_pol_flow_match hook above.
+ */
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+				struct common_audit_data *ad, u8 proto)
+{
+	struct dst_entry *dst;
+
+	switch (proto) {
+	case IPPROTO_AH:
+	case IPPROTO_ESP:
+	case IPPROTO_COMP:
+		/* We should have already seen this packet once before it
+		 * underwent xfrm(s). No need to subject it to the unlabeled
+		 * check. */
+		return 0;
+	default:
+		break;
+	}
+
+	dst = skb_dst(skb);
+	if (dst) {
+		struct dst_entry *iter;
+
+		for (iter = dst; iter != NULL; iter = iter->child) {
+			struct xfrm_state *x = iter->xfrm;
+
+			if (x && selinux_authorizable_xfrm(x))
+				return 0;
+		}
+	}
+
+	/* This check even when there's no association involved is intended,
+	 * according to Trent Jaeger, to make sure a process can't engage in
+	 * non-IPsec communication unless explicitly allowed by policy. */
+	return avc_has_perm(sk_sid, SECINITSID_UNLABELED,
+			    SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad);
+}
diff --git a/security/smack/Kconfig b/security/smack/Kconfig
new file mode 100644
index 0000000..271adae
--- /dev/null
+++ b/security/smack/Kconfig
@@ -0,0 +1,42 @@
+config SECURITY_SMACK
+	bool "Simplified Mandatory Access Control Kernel Support"
+	depends on NET
+	depends on INET
+	depends on SECURITY
+	select NETLABEL
+	select SECURITY_NETWORK
+	default n
+	help
+	  This selects the Simplified Mandatory Access Control Kernel.
+	  Smack is useful for sensitivity, integrity, and a variety
+	  of other mandatory security schemes.
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_SMACK_BRINGUP
+	bool "Reporting on access granted by Smack rules"
+	depends on SECURITY_SMACK
+	default n
+	help
+	  Enable the bring-up ("b") access mode in Smack rules.
+	  When access is granted by a rule with the "b" mode a
+	  message about the access requested is generated. The
+	  intention is that a process can be granted a wide set
+	  of access initially with the bringup mode set on the
+	  rules. The developer can use the information to
+	  identify which rules are necessary and what accesses
+	  may be inappropriate. The developer can reduce the
+	  access rule set once the behavior is well understood.
+	  This is a superior mechanism to the oft abused
+	  "permissive" mode of other systems.
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_SMACK_NETFILTER
+	bool "Packet marking using secmarks for netfilter"
+	depends on SECURITY_SMACK
+	depends on NETWORK_SECMARK
+	depends on NETFILTER
+	default n
+	help
+	  This enables security marking of network packets using
+	  Smack labels.
+	  If you are unsure how to answer this question, answer N.
diff --git a/security/smack/Makefile b/security/smack/Makefile
new file mode 100644
index 0000000..ee2ebd5
--- /dev/null
+++ b/security/smack/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the SMACK LSM
+#
+
+obj-$(CONFIG_SECURITY_SMACK) := smack.o
+
+smack-y := smack_lsm.o smack_access.o smackfs.o
+smack-$(CONFIG_SECURITY_SMACK_NETFILTER) += smack_netfilter.o
diff --git a/security/smack/smack.h b/security/smack/smack.h
new file mode 100644
index 0000000..6c91156
--- /dev/null
+++ b/security/smack/smack.h
@@ -0,0 +1,485 @@
+/*
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ *      This program is free software; you can redistribute it and/or modify
+ *      it under the terms of the GNU General Public License as published by
+ *      the Free Software Foundation, version 2.
+ *
+ * Author:
+ *      Casey Schaufler <casey@schaufler-ca.com>
+ *
+ */
+
+#ifndef _SECURITY_SMACK_H
+#define _SECURITY_SMACK_H
+
+#include <linux/capability.h>
+#include <linux/spinlock.h>
+#include <linux/lsm_hooks.h>
+#include <linux/in.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/in6.h>
+#endif /* CONFIG_IPV6 */
+#include <net/netlabel.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/lsm_audit.h>
+
+/*
+ * Use IPv6 port labeling if IPv6 is enabled and secmarks
+ * are not being used.
+ */
+#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#define SMACK_IPV6_PORT_LABELING 1
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6) && defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#define SMACK_IPV6_SECMARK_LABELING 1
+#endif
+
+/*
+ * Smack labels were limited to 23 characters for a long time.
+ */
+#define SMK_LABELLEN	24
+#define SMK_LONGLABEL	256
+
+/*
+ * This is the repository for labels seen so that it is
+ * not necessary to keep allocating tiny chuncks of memory
+ * and so that they can be shared.
+ *
+ * Labels are never modified in place. Anytime a label
+ * is imported (e.g. xattrset on a file) the list is checked
+ * for it and it is added if it doesn't exist. The address
+ * is passed out in either case. Entries are added, but
+ * never deleted.
+ *
+ * Since labels are hanging around anyway it doesn't
+ * hurt to maintain a secid for those awkward situations
+ * where kernel components that ought to use LSM independent
+ * interfaces don't. The secid should go away when all of
+ * these components have been repaired.
+ *
+ * The cipso value associated with the label gets stored here, too.
+ *
+ * Keep the access rules for this subject label here so that
+ * the entire set of rules does not need to be examined every
+ * time.
+ */
+struct smack_known {
+	struct list_head		list;
+	struct hlist_node		smk_hashed;
+	char				*smk_known;
+	u32				smk_secid;
+	struct netlbl_lsm_secattr	smk_netlabel;	/* on wire labels */
+	struct list_head		smk_rules;	/* access rules */
+	struct mutex			smk_rules_lock;	/* lock for rules */
+};
+
+/*
+ * Maximum number of bytes for the levels in a CIPSO IP option.
+ * Why 23? CIPSO is constrained to 30, so a 32 byte buffer is
+ * bigger than can be used, and 24 is the next lower multiple
+ * of 8, and there are too many issues if there isn't space set
+ * aside for the terminating null byte.
+ */
+#define SMK_CIPSOLEN	24
+
+struct superblock_smack {
+	struct smack_known	*smk_root;
+	struct smack_known	*smk_floor;
+	struct smack_known	*smk_hat;
+	struct smack_known	*smk_default;
+	int			smk_initialized;
+};
+
+struct socket_smack {
+	struct smack_known	*smk_out;	/* outbound label */
+	struct smack_known	*smk_in;	/* inbound label */
+	struct smack_known	*smk_packet;	/* TCP peer label */
+};
+
+/*
+ * Inode smack data
+ */
+struct inode_smack {
+	struct smack_known	*smk_inode;	/* label of the fso */
+	struct smack_known	*smk_task;	/* label of the task */
+	struct smack_known	*smk_mmap;	/* label of the mmap domain */
+	struct mutex		smk_lock;	/* initialization lock */
+	int			smk_flags;	/* smack inode flags */
+};
+
+struct task_smack {
+	struct smack_known	*smk_task;	/* label for access control */
+	struct smack_known	*smk_forked;	/* label when forked */
+	struct list_head	smk_rules;	/* per task access rules */
+	struct mutex		smk_rules_lock;	/* lock for the rules */
+	struct list_head	smk_relabel;	/* transit allowed labels */
+};
+
+#define	SMK_INODE_INSTANT	0x01	/* inode is instantiated */
+#define	SMK_INODE_TRANSMUTE	0x02	/* directory is transmuting */
+#define	SMK_INODE_CHANGED	0x04	/* smack was transmuted */
+#define	SMK_INODE_IMPURE	0x08	/* involved in an impure transaction */
+
+/*
+ * A label access rule.
+ */
+struct smack_rule {
+	struct list_head	list;
+	struct smack_known	*smk_subject;
+	struct smack_known	*smk_object;
+	int			smk_access;
+};
+
+/*
+ * An entry in the table identifying IPv4 hosts.
+ */
+struct smk_net4addr {
+	struct list_head	list;
+	struct in_addr		smk_host;	/* network address */
+	struct in_addr		smk_mask;	/* network mask */
+	int			smk_masks;	/* mask size */
+	struct smack_known	*smk_label;	/* label */
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+/*
+ * An entry in the table identifying IPv6 hosts.
+ */
+struct smk_net6addr {
+	struct list_head	list;
+	struct in6_addr		smk_host;	/* network address */
+	struct in6_addr		smk_mask;	/* network mask */
+	int			smk_masks;	/* mask size */
+	struct smack_known	*smk_label;	/* label */
+};
+#endif /* CONFIG_IPV6 */
+
+#ifdef SMACK_IPV6_PORT_LABELING
+/*
+ * An entry in the table identifying ports.
+ */
+struct smk_port_label {
+	struct list_head	list;
+	struct sock		*smk_sock;	/* socket initialized on */
+	unsigned short		smk_port;	/* the port number */
+	struct smack_known	*smk_in;	/* inbound label */
+	struct smack_known	*smk_out;	/* outgoing label */
+};
+#endif /* SMACK_IPV6_PORT_LABELING */
+
+struct smack_known_list_elem {
+	struct list_head	list;
+	struct smack_known	*smk_label;
+};
+
+/* Super block security struct flags for mount options */
+#define FSDEFAULT_MNT	0x01
+#define FSFLOOR_MNT	0x02
+#define FSHAT_MNT	0x04
+#define FSROOT_MNT	0x08
+#define FSTRANS_MNT	0x10
+
+#define NUM_SMK_MNT_OPTS	5
+
+enum {
+	Opt_error = -1,
+	Opt_fsdefault = 1,
+	Opt_fsfloor = 2,
+	Opt_fshat = 3,
+	Opt_fsroot = 4,
+	Opt_fstransmute = 5,
+};
+
+/*
+ * Mount options
+ */
+#define SMK_FSDEFAULT	"smackfsdef="
+#define SMK_FSFLOOR	"smackfsfloor="
+#define SMK_FSHAT	"smackfshat="
+#define SMK_FSROOT	"smackfsroot="
+#define SMK_FSTRANS	"smackfstransmute="
+
+#define SMACK_DELETE_OPTION	"-DELETE"
+#define SMACK_CIPSO_OPTION 	"-CIPSO"
+
+/*
+ * How communications on this socket are treated.
+ * Usually it's determined by the underlying netlabel code
+ * but there are certain cases, including single label hosts
+ * and potentially single label interfaces for which the
+ * treatment can not be known in advance.
+ *
+ * The possibility of additional labeling schemes being
+ * introduced in the future exists as well.
+ */
+#define SMACK_UNLABELED_SOCKET	0
+#define SMACK_CIPSO_SOCKET	1
+
+/*
+ * CIPSO defaults.
+ */
+#define SMACK_CIPSO_DOI_DEFAULT		3	/* Historical */
+#define SMACK_CIPSO_DOI_INVALID		-1	/* Not a DOI */
+#define SMACK_CIPSO_DIRECT_DEFAULT	250	/* Arbitrary */
+#define SMACK_CIPSO_MAPPED_DEFAULT	251	/* Also arbitrary */
+#define SMACK_CIPSO_MAXLEVEL            255     /* CIPSO 2.2 standard */
+/*
+ * CIPSO 2.2 standard is 239, but Smack wants to use the
+ * categories in a structured way that limits the value to
+ * the bits in 23 bytes, hence the unusual number.
+ */
+#define SMACK_CIPSO_MAXCATNUM           184     /* 23 * 8 */
+
+/*
+ * Ptrace rules
+ */
+#define SMACK_PTRACE_DEFAULT	0
+#define SMACK_PTRACE_EXACT	1
+#define SMACK_PTRACE_DRACONIAN	2
+#define SMACK_PTRACE_MAX	SMACK_PTRACE_DRACONIAN
+
+/*
+ * Flags for untraditional access modes.
+ * It shouldn't be necessary to avoid conflicts with definitions
+ * in fs.h, but do so anyway.
+ */
+#define MAY_TRANSMUTE	0x00001000	/* Controls directory labeling */
+#define MAY_LOCK	0x00002000	/* Locks should be writes, but ... */
+#define MAY_BRINGUP	0x00004000	/* Report use of this rule */
+
+#define SMACK_BRINGUP_ALLOW		1	/* Allow bringup mode */
+#define SMACK_UNCONFINED_SUBJECT	2	/* Allow unconfined label */
+#define SMACK_UNCONFINED_OBJECT		3	/* Allow unconfined label */
+
+/*
+ * Just to make the common cases easier to deal with
+ */
+#define MAY_ANYREAD	(MAY_READ | MAY_EXEC)
+#define MAY_READWRITE	(MAY_READ | MAY_WRITE)
+#define MAY_NOT		0
+
+/*
+ * Number of access types used by Smack (rwxatlb)
+ */
+#define SMK_NUM_ACCESS_TYPE 7
+
+/* SMACK data */
+struct smack_audit_data {
+	const char *function;
+	char *subject;
+	char *object;
+	char *request;
+	int result;
+};
+
+/*
+ * Smack audit data; is empty if CONFIG_AUDIT not set
+ * to save some stack
+ */
+struct smk_audit_info {
+#ifdef CONFIG_AUDIT
+	struct common_audit_data a;
+	struct smack_audit_data sad;
+#endif
+};
+
+/*
+ * These functions are in smack_access.c
+ */
+int smk_access_entry(char *, char *, struct list_head *);
+int smk_access(struct smack_known *, struct smack_known *,
+	       int, struct smk_audit_info *);
+int smk_tskacc(struct task_smack *, struct smack_known *,
+	       u32, struct smk_audit_info *);
+int smk_curacc(struct smack_known *, u32, struct smk_audit_info *);
+struct smack_known *smack_from_secid(const u32);
+char *smk_parse_smack(const char *string, int len);
+int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int);
+struct smack_known *smk_import_entry(const char *, int);
+void smk_insert_entry(struct smack_known *skp);
+struct smack_known *smk_find_entry(const char *);
+int smack_privileged(int cap);
+void smk_destroy_label_list(struct list_head *list);
+
+/*
+ * Shared data.
+ */
+extern int smack_enabled;
+extern int smack_cipso_direct;
+extern int smack_cipso_mapped;
+extern struct smack_known *smack_net_ambient;
+extern struct smack_known *smack_syslog_label;
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+extern struct smack_known *smack_unconfined;
+#endif
+extern int smack_ptrace_rule;
+
+extern struct smack_known smack_known_floor;
+extern struct smack_known smack_known_hat;
+extern struct smack_known smack_known_huh;
+extern struct smack_known smack_known_invalid;
+extern struct smack_known smack_known_star;
+extern struct smack_known smack_known_web;
+
+extern struct mutex	smack_known_lock;
+extern struct list_head smack_known_list;
+extern struct list_head smk_net4addr_list;
+#if IS_ENABLED(CONFIG_IPV6)
+extern struct list_head smk_net6addr_list;
+#endif /* CONFIG_IPV6 */
+
+extern struct mutex     smack_onlycap_lock;
+extern struct list_head smack_onlycap_list;
+
+#define SMACK_HASH_SLOTS 16
+extern struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
+
+/*
+ * Is the directory transmuting?
+ */
+static inline int smk_inode_transmutable(const struct inode *isp)
+{
+	struct inode_smack *sip = isp->i_security;
+	return (sip->smk_flags & SMK_INODE_TRANSMUTE) != 0;
+}
+
+/*
+ * Present a pointer to the smack label entry in an inode blob.
+ */
+static inline struct smack_known *smk_of_inode(const struct inode *isp)
+{
+	struct inode_smack *sip = isp->i_security;
+	return sip->smk_inode;
+}
+
+/*
+ * Present a pointer to the smack label entry in an task blob.
+ */
+static inline struct smack_known *smk_of_task(const struct task_smack *tsp)
+{
+	return tsp->smk_task;
+}
+
+static inline struct smack_known *smk_of_task_struct(const struct task_struct *t)
+{
+	struct smack_known *skp;
+
+	rcu_read_lock();
+	skp = smk_of_task(__task_cred(t)->security);
+	rcu_read_unlock();
+	return skp;
+}
+
+/*
+ * Present a pointer to the forked smack label entry in an task blob.
+ */
+static inline struct smack_known *smk_of_forked(const struct task_smack *tsp)
+{
+	return tsp->smk_forked;
+}
+
+/*
+ * Present a pointer to the smack label in the current task blob.
+ */
+static inline struct smack_known *smk_of_current(void)
+{
+	return smk_of_task(current_security());
+}
+
+/*
+ * logging functions
+ */
+#define SMACK_AUDIT_DENIED 0x1
+#define SMACK_AUDIT_ACCEPT 0x2
+extern int log_policy;
+
+void smack_log(char *subject_label, char *object_label,
+		int request,
+		int result, struct smk_audit_info *auditdata);
+
+#ifdef CONFIG_AUDIT
+
+/*
+ * some inline functions to set up audit data
+ * they do nothing if CONFIG_AUDIT is not set
+ *
+ */
+static inline void smk_ad_init(struct smk_audit_info *a, const char *func,
+			       char type)
+{
+	memset(&a->sad, 0, sizeof(a->sad));
+	a->a.type = type;
+	a->a.smack_audit_data = &a->sad;
+	a->a.smack_audit_data->function = func;
+}
+
+static inline void smk_ad_init_net(struct smk_audit_info *a, const char *func,
+				   char type, struct lsm_network_audit *net)
+{
+	smk_ad_init(a, func, type);
+	memset(net, 0, sizeof(*net));
+	a->a.u.net = net;
+}
+
+static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
+					 struct task_struct *t)
+{
+	a->a.u.tsk = t;
+}
+static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a,
+						    struct dentry *d)
+{
+	a->a.u.dentry = d;
+}
+static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a,
+					      struct inode *i)
+{
+	a->a.u.inode = i;
+}
+static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a,
+					     struct path p)
+{
+	a->a.u.path = p;
+}
+static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a,
+					    struct sock *sk)
+{
+	a->a.u.net->sk = sk;
+}
+
+#else /* no AUDIT */
+
+static inline void smk_ad_init(struct smk_audit_info *a, const char *func,
+			       char type)
+{
+}
+static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
+					 struct task_struct *t)
+{
+}
+static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a,
+						    struct dentry *d)
+{
+}
+static inline void smk_ad_setfield_u_fs_path_mnt(struct smk_audit_info *a,
+						 struct vfsmount *m)
+{
+}
+static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a,
+					      struct inode *i)
+{
+}
+static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a,
+					     struct path p)
+{
+}
+static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a,
+					    struct sock *sk)
+{
+}
+#endif
+
+#endif  /* _SECURITY_SMACK_H */
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
new file mode 100644
index 0000000..a283f9e
--- /dev/null
+++ b/security/smack/smack_access.c
@@ -0,0 +1,666 @@
+/*
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ *      This program is free software; you can redistribute it and/or modify
+ *      it under the terms of the GNU General Public License as published by
+ *      the Free Software Foundation, version 2.
+ *
+ * Author:
+ *      Casey Schaufler <casey@schaufler-ca.com>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include "smack.h"
+
+struct smack_known smack_known_huh = {
+	.smk_known	= "?",
+	.smk_secid	= 2,
+};
+
+struct smack_known smack_known_hat = {
+	.smk_known	= "^",
+	.smk_secid	= 3,
+};
+
+struct smack_known smack_known_star = {
+	.smk_known	= "*",
+	.smk_secid	= 4,
+};
+
+struct smack_known smack_known_floor = {
+	.smk_known	= "_",
+	.smk_secid	= 5,
+};
+
+struct smack_known smack_known_invalid = {
+	.smk_known	= "",
+	.smk_secid	= 6,
+};
+
+struct smack_known smack_known_web = {
+	.smk_known	= "@",
+	.smk_secid	= 7,
+};
+
+LIST_HEAD(smack_known_list);
+
+/*
+ * The initial value needs to be bigger than any of the
+ * known values above.
+ */
+static u32 smack_next_secid = 10;
+
+/*
+ * what events do we log
+ * can be overwritten at run-time by /smack/logging
+ */
+int log_policy = SMACK_AUDIT_DENIED;
+
+/**
+ * smk_access_entry - look up matching access rule
+ * @subject_label: a pointer to the subject's Smack label
+ * @object_label: a pointer to the object's Smack label
+ * @rule_list: the list of rules to search
+ *
+ * This function looks up the subject/object pair in the
+ * access rule list and returns the access mode. If no
+ * entry is found returns -ENOENT.
+ *
+ * NOTE:
+ *
+ * Earlier versions of this function allowed for labels that
+ * were not on the label list. This was done to allow for
+ * labels to come over the network that had never been seen
+ * before on this host. Unless the receiving socket has the
+ * star label this will always result in a failure check. The
+ * star labeled socket case is now handled in the networking
+ * hooks so there is no case where the label is not on the
+ * label list. Checking to see if the address of two labels
+ * is the same is now a reliable test.
+ *
+ * Do the object check first because that is more
+ * likely to differ.
+ *
+ * Allowing write access implies allowing locking.
+ */
+int smk_access_entry(char *subject_label, char *object_label,
+			struct list_head *rule_list)
+{
+	int may = -ENOENT;
+	struct smack_rule *srp;
+
+	list_for_each_entry_rcu(srp, rule_list, list) {
+		if (srp->smk_object->smk_known == object_label &&
+		    srp->smk_subject->smk_known == subject_label) {
+			may = srp->smk_access;
+			break;
+		}
+	}
+
+	/*
+	 * MAY_WRITE implies MAY_LOCK.
+	 */
+	if ((may & MAY_WRITE) == MAY_WRITE)
+		may |= MAY_LOCK;
+	return may;
+}
+
+/**
+ * smk_access - determine if a subject has a specific access to an object
+ * @subject: a pointer to the subject's Smack label entry
+ * @object: a pointer to the object's Smack label entry
+ * @request: the access requested, in "MAY" format
+ * @a : a pointer to the audit data
+ *
+ * This function looks up the subject/object pair in the
+ * access rule list and returns 0 if the access is permitted,
+ * non zero otherwise.
+ *
+ * Smack labels are shared on smack_list
+ */
+int smk_access(struct smack_known *subject, struct smack_known *object,
+	       int request, struct smk_audit_info *a)
+{
+	int may = MAY_NOT;
+	int rc = 0;
+
+	/*
+	 * Hardcoded comparisons.
+	 */
+	/*
+	 * A star subject can't access any object.
+	 */
+	if (subject == &smack_known_star) {
+		rc = -EACCES;
+		goto out_audit;
+	}
+	/*
+	 * An internet object can be accessed by any subject.
+	 * Tasks cannot be assigned the internet label.
+	 * An internet subject can access any object.
+	 */
+	if (object == &smack_known_web || subject == &smack_known_web)
+		goto out_audit;
+	/*
+	 * A star object can be accessed by any subject.
+	 */
+	if (object == &smack_known_star)
+		goto out_audit;
+	/*
+	 * An object can be accessed in any way by a subject
+	 * with the same label.
+	 */
+	if (subject->smk_known == object->smk_known)
+		goto out_audit;
+	/*
+	 * A hat subject can read or lock any object.
+	 * A floor object can be read or locked by any subject.
+	 */
+	if ((request & MAY_ANYREAD) == request ||
+	    (request & MAY_LOCK) == request) {
+		if (object == &smack_known_floor)
+			goto out_audit;
+		if (subject == &smack_known_hat)
+			goto out_audit;
+	}
+	/*
+	 * Beyond here an explicit relationship is required.
+	 * If the requested access is contained in the available
+	 * access (e.g. read is included in readwrite) it's
+	 * good. A negative response from smk_access_entry()
+	 * indicates there is no entry for this pair.
+	 */
+	rcu_read_lock();
+	may = smk_access_entry(subject->smk_known, object->smk_known,
+			       &subject->smk_rules);
+	rcu_read_unlock();
+
+	if (may <= 0 || (request & may) != request) {
+		rc = -EACCES;
+		goto out_audit;
+	}
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+	/*
+	 * Return a positive value if using bringup mode.
+	 * This allows the hooks to identify checks that
+	 * succeed because of "b" rules.
+	 */
+	if (may & MAY_BRINGUP)
+		rc = SMACK_BRINGUP_ALLOW;
+#endif
+
+out_audit:
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+	if (rc < 0) {
+		if (object == smack_unconfined)
+			rc = SMACK_UNCONFINED_OBJECT;
+		if (subject == smack_unconfined)
+			rc = SMACK_UNCONFINED_SUBJECT;
+	}
+#endif
+
+#ifdef CONFIG_AUDIT
+	if (a)
+		smack_log(subject->smk_known, object->smk_known,
+			  request, rc, a);
+#endif
+
+	return rc;
+}
+
+/**
+ * smk_tskacc - determine if a task has a specific access to an object
+ * @tsp: a pointer to the subject's task
+ * @obj_known: a pointer to the object's label entry
+ * @mode: the access requested, in "MAY" format
+ * @a : common audit data
+ *
+ * This function checks the subject task's label/object label pair
+ * in the access rule list and returns 0 if the access is permitted,
+ * non zero otherwise. It allows that the task may have the capability
+ * to override the rules.
+ */
+int smk_tskacc(struct task_smack *tsp, struct smack_known *obj_known,
+	       u32 mode, struct smk_audit_info *a)
+{
+	struct smack_known *sbj_known = smk_of_task(tsp);
+	int may;
+	int rc;
+
+	/*
+	 * Check the global rule list
+	 */
+	rc = smk_access(sbj_known, obj_known, mode, NULL);
+	if (rc >= 0) {
+		/*
+		 * If there is an entry in the task's rule list
+		 * it can further restrict access.
+		 */
+		may = smk_access_entry(sbj_known->smk_known,
+				       obj_known->smk_known,
+				       &tsp->smk_rules);
+		if (may < 0)
+			goto out_audit;
+		if ((mode & may) == mode)
+			goto out_audit;
+		rc = -EACCES;
+	}
+
+	/*
+	 * Allow for priviliged to override policy.
+	 */
+	if (rc != 0 && smack_privileged(CAP_MAC_OVERRIDE))
+		rc = 0;
+
+out_audit:
+#ifdef CONFIG_AUDIT
+	if (a)
+		smack_log(sbj_known->smk_known, obj_known->smk_known,
+			  mode, rc, a);
+#endif
+	return rc;
+}
+
+/**
+ * smk_curacc - determine if current has a specific access to an object
+ * @obj_known: a pointer to the object's Smack label entry
+ * @mode: the access requested, in "MAY" format
+ * @a : common audit data
+ *
+ * This function checks the current subject label/object label pair
+ * in the access rule list and returns 0 if the access is permitted,
+ * non zero otherwise. It allows that current may have the capability
+ * to override the rules.
+ */
+int smk_curacc(struct smack_known *obj_known,
+	       u32 mode, struct smk_audit_info *a)
+{
+	struct task_smack *tsp = current_security();
+
+	return smk_tskacc(tsp, obj_known, mode, a);
+}
+
+#ifdef CONFIG_AUDIT
+/**
+ * smack_str_from_perm : helper to transalate an int to a
+ * readable string
+ * @string : the string to fill
+ * @access : the int
+ *
+ */
+static inline void smack_str_from_perm(char *string, int access)
+{
+	int i = 0;
+
+	if (access & MAY_READ)
+		string[i++] = 'r';
+	if (access & MAY_WRITE)
+		string[i++] = 'w';
+	if (access & MAY_EXEC)
+		string[i++] = 'x';
+	if (access & MAY_APPEND)
+		string[i++] = 'a';
+	if (access & MAY_TRANSMUTE)
+		string[i++] = 't';
+	if (access & MAY_LOCK)
+		string[i++] = 'l';
+	string[i] = '\0';
+}
+/**
+ * smack_log_callback - SMACK specific information
+ * will be called by generic audit code
+ * @ab : the audit_buffer
+ * @a  : audit_data
+ *
+ */
+static void smack_log_callback(struct audit_buffer *ab, void *a)
+{
+	struct common_audit_data *ad = a;
+	struct smack_audit_data *sad = ad->smack_audit_data;
+	audit_log_format(ab, "lsm=SMACK fn=%s action=%s",
+			 ad->smack_audit_data->function,
+			 sad->result ? "denied" : "granted");
+	audit_log_format(ab, " subject=");
+	audit_log_untrustedstring(ab, sad->subject);
+	audit_log_format(ab, " object=");
+	audit_log_untrustedstring(ab, sad->object);
+	if (sad->request[0] == '\0')
+		audit_log_format(ab, " labels_differ");
+	else
+		audit_log_format(ab, " requested=%s", sad->request);
+}
+
+/**
+ *  smack_log - Audit the granting or denial of permissions.
+ *  @subject_label : smack label of the requester
+ *  @object_label  : smack label of the object being accessed
+ *  @request: requested permissions
+ *  @result: result from smk_access
+ *  @a:  auxiliary audit data
+ *
+ * Audit the granting or denial of permissions in accordance
+ * with the policy.
+ */
+void smack_log(char *subject_label, char *object_label, int request,
+	       int result, struct smk_audit_info *ad)
+{
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+	char request_buffer[SMK_NUM_ACCESS_TYPE + 5];
+#else
+	char request_buffer[SMK_NUM_ACCESS_TYPE + 1];
+#endif
+	struct smack_audit_data *sad;
+	struct common_audit_data *a = &ad->a;
+
+	/* check if we have to log the current event */
+	if (result < 0 && (log_policy & SMACK_AUDIT_DENIED) == 0)
+		return;
+	if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0)
+		return;
+
+	sad = a->smack_audit_data;
+
+	if (sad->function == NULL)
+		sad->function = "unknown";
+
+	/* end preparing the audit data */
+	smack_str_from_perm(request_buffer, request);
+	sad->subject = subject_label;
+	sad->object  = object_label;
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+	/*
+	 * The result may be positive in bringup mode.
+	 * A positive result is an allow, but not for normal reasons.
+	 * Mark it as successful, but don't filter it out even if
+	 * the logging policy says to do so.
+	 */
+	if (result == SMACK_UNCONFINED_SUBJECT)
+		strcat(request_buffer, "(US)");
+	else if (result == SMACK_UNCONFINED_OBJECT)
+		strcat(request_buffer, "(UO)");
+
+	if (result > 0)
+		result = 0;
+#endif
+	sad->request = request_buffer;
+	sad->result  = result;
+
+	common_lsm_audit(a, smack_log_callback, NULL);
+}
+#else /* #ifdef CONFIG_AUDIT */
+void smack_log(char *subject_label, char *object_label, int request,
+               int result, struct smk_audit_info *ad)
+{
+}
+#endif
+
+DEFINE_MUTEX(smack_known_lock);
+
+struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
+
+/**
+ * smk_insert_entry - insert a smack label into a hash map,
+ *
+ * this function must be called under smack_known_lock
+ */
+void smk_insert_entry(struct smack_known *skp)
+{
+	unsigned int hash;
+	struct hlist_head *head;
+
+	hash = full_name_hash(skp->smk_known, strlen(skp->smk_known));
+	head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)];
+
+	hlist_add_head_rcu(&skp->smk_hashed, head);
+	list_add_rcu(&skp->list, &smack_known_list);
+}
+
+/**
+ * smk_find_entry - find a label on the list, return the list entry
+ * @string: a text string that might be a Smack label
+ *
+ * Returns a pointer to the entry in the label list that
+ * matches the passed string or NULL if not found.
+ */
+struct smack_known *smk_find_entry(const char *string)
+{
+	unsigned int hash;
+	struct hlist_head *head;
+	struct smack_known *skp;
+
+	hash = full_name_hash(string, strlen(string));
+	head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)];
+
+	hlist_for_each_entry_rcu(skp, head, smk_hashed)
+		if (strcmp(skp->smk_known, string) == 0)
+			return skp;
+
+	return NULL;
+}
+
+/**
+ * smk_parse_smack - parse smack label from a text string
+ * @string: a text string that might contain a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ *
+ * Returns a pointer to the clean label or an error code.
+ */
+char *smk_parse_smack(const char *string, int len)
+{
+	char *smack;
+	int i;
+
+	if (len <= 0)
+		len = strlen(string) + 1;
+
+	/*
+	 * Reserve a leading '-' as an indicator that
+	 * this isn't a label, but an option to interfaces
+	 * including /smack/cipso and /smack/cipso2
+	 */
+	if (string[0] == '-')
+		return ERR_PTR(-EINVAL);
+
+	for (i = 0; i < len; i++)
+		if (string[i] > '~' || string[i] <= ' ' || string[i] == '/' ||
+		    string[i] == '"' || string[i] == '\\' || string[i] == '\'')
+			break;
+
+	if (i == 0 || i >= SMK_LONGLABEL)
+		return ERR_PTR(-EINVAL);
+
+	smack = kzalloc(i + 1, GFP_KERNEL);
+	if (smack == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	strncpy(smack, string, i);
+
+	return smack;
+}
+
+/**
+ * smk_netlbl_mls - convert a catset to netlabel mls categories
+ * @catset: the Smack categories
+ * @sap: where to put the netlabel categories
+ *
+ * Allocates and fills attr.mls
+ * Returns 0 on success, error code on failure.
+ */
+int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
+			int len)
+{
+	unsigned char *cp;
+	unsigned char m;
+	int cat;
+	int rc;
+	int byte;
+
+	sap->flags |= NETLBL_SECATTR_MLS_CAT;
+	sap->attr.mls.lvl = level;
+	sap->attr.mls.cat = NULL;
+
+	for (cat = 1, cp = catset, byte = 0; byte < len; cp++, byte++)
+		for (m = 0x80; m != 0; m >>= 1, cat++) {
+			if ((m & *cp) == 0)
+				continue;
+			rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
+						  cat, GFP_ATOMIC);
+			if (rc < 0) {
+				netlbl_catmap_free(sap->attr.mls.cat);
+				return rc;
+			}
+		}
+
+	return 0;
+}
+
+/**
+ * smk_import_entry - import a label, return the list entry
+ * @string: a text string that might be a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ *
+ * Returns a pointer to the entry in the label list that
+ * matches the passed string, adding it if necessary,
+ * or an error code.
+ */
+struct smack_known *smk_import_entry(const char *string, int len)
+{
+	struct smack_known *skp;
+	char *smack;
+	int slen;
+	int rc;
+
+	smack = smk_parse_smack(string, len);
+	if (IS_ERR(smack))
+		return ERR_CAST(smack);
+
+	mutex_lock(&smack_known_lock);
+
+	skp = smk_find_entry(smack);
+	if (skp != NULL)
+		goto freeout;
+
+	skp = kzalloc(sizeof(*skp), GFP_KERNEL);
+	if (skp == NULL) {
+		skp = ERR_PTR(-ENOMEM);
+		goto freeout;
+	}
+
+	skp->smk_known = smack;
+	skp->smk_secid = smack_next_secid++;
+	skp->smk_netlabel.domain = skp->smk_known;
+	skp->smk_netlabel.flags =
+		NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL;
+	/*
+	 * If direct labeling works use it.
+	 * Otherwise use mapped labeling.
+	 */
+	slen = strlen(smack);
+	if (slen < SMK_CIPSOLEN)
+		rc = smk_netlbl_mls(smack_cipso_direct, skp->smk_known,
+			       &skp->smk_netlabel, slen);
+	else
+		rc = smk_netlbl_mls(smack_cipso_mapped, (char *)&skp->smk_secid,
+			       &skp->smk_netlabel, sizeof(skp->smk_secid));
+
+	if (rc >= 0) {
+		INIT_LIST_HEAD(&skp->smk_rules);
+		mutex_init(&skp->smk_rules_lock);
+		/*
+		 * Make sure that the entry is actually
+		 * filled before putting it on the list.
+		 */
+		smk_insert_entry(skp);
+		goto unlockout;
+	}
+	/*
+	 * smk_netlbl_mls failed.
+	 */
+	kfree(skp);
+	skp = ERR_PTR(rc);
+freeout:
+	kfree(smack);
+unlockout:
+	mutex_unlock(&smack_known_lock);
+
+	return skp;
+}
+
+/**
+ * smack_from_secid - find the Smack label associated with a secid
+ * @secid: an integer that might be associated with a Smack label
+ *
+ * Returns a pointer to the appropriate Smack label entry if there is one,
+ * otherwise a pointer to the invalid Smack label.
+ */
+struct smack_known *smack_from_secid(const u32 secid)
+{
+	struct smack_known *skp;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(skp, &smack_known_list, list) {
+		if (skp->smk_secid == secid) {
+			rcu_read_unlock();
+			return skp;
+		}
+	}
+
+	/*
+	 * If we got this far someone asked for the translation
+	 * of a secid that is not on the list.
+	 */
+	rcu_read_unlock();
+	return &smack_known_invalid;
+}
+
+/*
+ * Unless a process is running with one of these labels
+ * even having CAP_MAC_OVERRIDE isn't enough to grant
+ * privilege to violate MAC policy. If no labels are
+ * designated (the empty list case) capabilities apply to
+ * everyone.
+ */
+LIST_HEAD(smack_onlycap_list);
+DEFINE_MUTEX(smack_onlycap_lock);
+
+/*
+ * Is the task privileged and allowed to be privileged
+ * by the onlycap rule.
+ *
+ * Returns 1 if the task is allowed to be privileged, 0 if it's not.
+ */
+int smack_privileged(int cap)
+{
+	struct smack_known *skp = smk_of_current();
+	struct smack_known_list_elem *sklep;
+
+	/*
+	 * All kernel tasks are privileged
+	 */
+	if (unlikely(current->flags & PF_KTHREAD))
+		return 1;
+
+	if (!capable(cap))
+		return 0;
+
+	rcu_read_lock();
+	if (list_empty(&smack_onlycap_list)) {
+		rcu_read_unlock();
+		return 1;
+	}
+
+	list_for_each_entry_rcu(sklep, &smack_onlycap_list, list) {
+		if (sklep->smk_label == skp) {
+			rcu_read_unlock();
+			return 1;
+		}
+	}
+	rcu_read_unlock();
+
+	return 0;
+}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
new file mode 100644
index 0000000..735a1a9
--- /dev/null
+++ b/security/smack/smack_lsm.c
@@ -0,0 +1,4806 @@
+/*
+ *  Simplified MAC Kernel (smack) security module
+ *
+ *  This file contains the smack hook function implementations.
+ *
+ *  Authors:
+ *	Casey Schaufler <casey@schaufler-ca.com>
+ *	Jarkko Sakkinen <jarkko.sakkinen@intel.com>
+ *
+ *  Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *  Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ *                Paul Moore <paul@paul-moore.com>
+ *  Copyright (C) 2010 Nokia Corporation
+ *  Copyright (C) 2011 Intel Corporation.
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License version 2,
+ *      as published by the Free Software Foundation.
+ */
+
+#include <linux/xattr.h>
+#include <linux/pagemap.h>
+#include <linux/mount.h>
+#include <linux/stat.h>
+#include <linux/kd.h>
+#include <asm/ioctls.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/dccp.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/pipe_fs_i.h>
+#include <net/cipso_ipv4.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/audit.h>
+#include <linux/magic.h>
+#include <linux/dcache.h>
+#include <linux/personality.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/binfmts.h>
+#include <linux/parser.h>
+#include "smack.h"
+
+#define TRANS_TRUE	"TRUE"
+#define TRANS_TRUE_SIZE	4
+
+#define SMK_CONNECTING	0
+#define SMK_RECEIVING	1
+#define SMK_SENDING	2
+
+#ifdef SMACK_IPV6_PORT_LABELING
+static LIST_HEAD(smk_ipv6_port_list);
+#endif
+static struct kmem_cache *smack_inode_cache;
+int smack_enabled;
+
+static const match_table_t smk_mount_tokens = {
+	{Opt_fsdefault, SMK_FSDEFAULT "%s"},
+	{Opt_fsfloor, SMK_FSFLOOR "%s"},
+	{Opt_fshat, SMK_FSHAT "%s"},
+	{Opt_fsroot, SMK_FSROOT "%s"},
+	{Opt_fstransmute, SMK_FSTRANS "%s"},
+	{Opt_error, NULL},
+};
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static char *smk_bu_mess[] = {
+	"Bringup Error",	/* Unused */
+	"Bringup",		/* SMACK_BRINGUP_ALLOW */
+	"Unconfined Subject",	/* SMACK_UNCONFINED_SUBJECT */
+	"Unconfined Object",	/* SMACK_UNCONFINED_OBJECT */
+};
+
+static void smk_bu_mode(int mode, char *s)
+{
+	int i = 0;
+
+	if (mode & MAY_READ)
+		s[i++] = 'r';
+	if (mode & MAY_WRITE)
+		s[i++] = 'w';
+	if (mode & MAY_EXEC)
+		s[i++] = 'x';
+	if (mode & MAY_APPEND)
+		s[i++] = 'a';
+	if (mode & MAY_TRANSMUTE)
+		s[i++] = 't';
+	if (mode & MAY_LOCK)
+		s[i++] = 'l';
+	if (i == 0)
+		s[i++] = '-';
+	s[i] = '\0';
+}
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_note(char *note, struct smack_known *sskp,
+		       struct smack_known *oskp, int mode, int rc)
+{
+	char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+	if (rc <= 0)
+		return rc;
+	if (rc > SMACK_UNCONFINED_OBJECT)
+		rc = 0;
+
+	smk_bu_mode(mode, acc);
+	pr_info("Smack %s: (%s %s %s) %s\n", smk_bu_mess[rc],
+		sskp->smk_known, oskp->smk_known, acc, note);
+	return 0;
+}
+#else
+#define smk_bu_note(note, sskp, oskp, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_current(char *note, struct smack_known *oskp,
+			  int mode, int rc)
+{
+	struct task_smack *tsp = current_security();
+	char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+	if (rc <= 0)
+		return rc;
+	if (rc > SMACK_UNCONFINED_OBJECT)
+		rc = 0;
+
+	smk_bu_mode(mode, acc);
+	pr_info("Smack %s: (%s %s %s) %s %s\n", smk_bu_mess[rc],
+		tsp->smk_task->smk_known, oskp->smk_known,
+		acc, current->comm, note);
+	return 0;
+}
+#else
+#define smk_bu_current(note, oskp, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_task(struct task_struct *otp, int mode, int rc)
+{
+	struct task_smack *tsp = current_security();
+	struct smack_known *smk_task = smk_of_task_struct(otp);
+	char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+	if (rc <= 0)
+		return rc;
+	if (rc > SMACK_UNCONFINED_OBJECT)
+		rc = 0;
+
+	smk_bu_mode(mode, acc);
+	pr_info("Smack %s: (%s %s %s) %s to %s\n", smk_bu_mess[rc],
+		tsp->smk_task->smk_known, smk_task->smk_known, acc,
+		current->comm, otp->comm);
+	return 0;
+}
+#else
+#define smk_bu_task(otp, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_inode(struct inode *inode, int mode, int rc)
+{
+	struct task_smack *tsp = current_security();
+	struct inode_smack *isp = inode->i_security;
+	char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+	if (isp->smk_flags & SMK_INODE_IMPURE)
+		pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n",
+			inode->i_sb->s_id, inode->i_ino, current->comm);
+
+	if (rc <= 0)
+		return rc;
+	if (rc > SMACK_UNCONFINED_OBJECT)
+		rc = 0;
+	if (rc == SMACK_UNCONFINED_SUBJECT &&
+	    (mode & (MAY_WRITE | MAY_APPEND)))
+		isp->smk_flags |= SMK_INODE_IMPURE;
+
+	smk_bu_mode(mode, acc);
+
+	pr_info("Smack %s: (%s %s %s) inode=(%s %ld) %s\n", smk_bu_mess[rc],
+		tsp->smk_task->smk_known, isp->smk_inode->smk_known, acc,
+		inode->i_sb->s_id, inode->i_ino, current->comm);
+	return 0;
+}
+#else
+#define smk_bu_inode(inode, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_file(struct file *file, int mode, int rc)
+{
+	struct task_smack *tsp = current_security();
+	struct smack_known *sskp = tsp->smk_task;
+	struct inode *inode = file_inode(file);
+	struct inode_smack *isp = inode->i_security;
+	char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+	if (isp->smk_flags & SMK_INODE_IMPURE)
+		pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n",
+			inode->i_sb->s_id, inode->i_ino, current->comm);
+
+	if (rc <= 0)
+		return rc;
+	if (rc > SMACK_UNCONFINED_OBJECT)
+		rc = 0;
+
+	smk_bu_mode(mode, acc);
+	pr_info("Smack %s: (%s %s %s) file=(%s %ld %pD) %s\n", smk_bu_mess[rc],
+		sskp->smk_known, smk_of_inode(inode)->smk_known, acc,
+		inode->i_sb->s_id, inode->i_ino, file,
+		current->comm);
+	return 0;
+}
+#else
+#define smk_bu_file(file, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_credfile(const struct cred *cred, struct file *file,
+				int mode, int rc)
+{
+	struct task_smack *tsp = cred->security;
+	struct smack_known *sskp = tsp->smk_task;
+	struct inode *inode = file->f_inode;
+	struct inode_smack *isp = inode->i_security;
+	char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+	if (isp->smk_flags & SMK_INODE_IMPURE)
+		pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n",
+			inode->i_sb->s_id, inode->i_ino, current->comm);
+
+	if (rc <= 0)
+		return rc;
+	if (rc > SMACK_UNCONFINED_OBJECT)
+		rc = 0;
+
+	smk_bu_mode(mode, acc);
+	pr_info("Smack %s: (%s %s %s) file=(%s %ld %pD) %s\n", smk_bu_mess[rc],
+		sskp->smk_known, smk_of_inode(inode)->smk_known, acc,
+		inode->i_sb->s_id, inode->i_ino, file,
+		current->comm);
+	return 0;
+}
+#else
+#define smk_bu_credfile(cred, file, mode, RC) (RC)
+#endif
+
+/**
+ * smk_fetch - Fetch the smack label from a file.
+ * @name: type of the label (attribute)
+ * @ip: a pointer to the inode
+ * @dp: a pointer to the dentry
+ *
+ * Returns a pointer to the master list entry for the Smack label,
+ * NULL if there was no label to fetch, or an error code.
+ */
+static struct smack_known *smk_fetch(const char *name, struct inode *ip,
+					struct dentry *dp)
+{
+	int rc;
+	char *buffer;
+	struct smack_known *skp = NULL;
+
+	if (ip->i_op->getxattr == NULL)
+		return ERR_PTR(-EOPNOTSUPP);
+
+	buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
+	if (buffer == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	rc = ip->i_op->getxattr(dp, name, buffer, SMK_LONGLABEL);
+	if (rc < 0)
+		skp = ERR_PTR(rc);
+	else if (rc == 0)
+		skp = NULL;
+	else
+		skp = smk_import_entry(buffer, rc);
+
+	kfree(buffer);
+
+	return skp;
+}
+
+/**
+ * new_inode_smack - allocate an inode security blob
+ * @skp: a pointer to the Smack label entry to use in the blob
+ *
+ * Returns the new blob or NULL if there's no memory available
+ */
+static struct inode_smack *new_inode_smack(struct smack_known *skp)
+{
+	struct inode_smack *isp;
+
+	isp = kmem_cache_zalloc(smack_inode_cache, GFP_NOFS);
+	if (isp == NULL)
+		return NULL;
+
+	isp->smk_inode = skp;
+	isp->smk_flags = 0;
+	mutex_init(&isp->smk_lock);
+
+	return isp;
+}
+
+/**
+ * new_task_smack - allocate a task security blob
+ * @task: a pointer to the Smack label for the running task
+ * @forked: a pointer to the Smack label for the forked task
+ * @gfp: type of the memory for the allocation
+ *
+ * Returns the new blob or NULL if there's no memory available
+ */
+static struct task_smack *new_task_smack(struct smack_known *task,
+					struct smack_known *forked, gfp_t gfp)
+{
+	struct task_smack *tsp;
+
+	tsp = kzalloc(sizeof(struct task_smack), gfp);
+	if (tsp == NULL)
+		return NULL;
+
+	tsp->smk_task = task;
+	tsp->smk_forked = forked;
+	INIT_LIST_HEAD(&tsp->smk_rules);
+	INIT_LIST_HEAD(&tsp->smk_relabel);
+	mutex_init(&tsp->smk_rules_lock);
+
+	return tsp;
+}
+
+/**
+ * smk_copy_rules - copy a rule set
+ * @nhead: new rules header pointer
+ * @ohead: old rules header pointer
+ * @gfp: type of the memory for the allocation
+ *
+ * Returns 0 on success, -ENOMEM on error
+ */
+static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead,
+				gfp_t gfp)
+{
+	struct smack_rule *nrp;
+	struct smack_rule *orp;
+	int rc = 0;
+
+	INIT_LIST_HEAD(nhead);
+
+	list_for_each_entry_rcu(orp, ohead, list) {
+		nrp = kzalloc(sizeof(struct smack_rule), gfp);
+		if (nrp == NULL) {
+			rc = -ENOMEM;
+			break;
+		}
+		*nrp = *orp;
+		list_add_rcu(&nrp->list, nhead);
+	}
+	return rc;
+}
+
+/**
+ * smk_copy_relabel - copy smk_relabel labels list
+ * @nhead: new rules header pointer
+ * @ohead: old rules header pointer
+ * @gfp: type of the memory for the allocation
+ *
+ * Returns 0 on success, -ENOMEM on error
+ */
+static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
+				gfp_t gfp)
+{
+	struct smack_known_list_elem *nklep;
+	struct smack_known_list_elem *oklep;
+
+	INIT_LIST_HEAD(nhead);
+
+	list_for_each_entry(oklep, ohead, list) {
+		nklep = kzalloc(sizeof(struct smack_known_list_elem), gfp);
+		if (nklep == NULL) {
+			smk_destroy_label_list(nhead);
+			return -ENOMEM;
+		}
+		nklep->smk_label = oklep->smk_label;
+		list_add(&nklep->list, nhead);
+	}
+
+	return 0;
+}
+
+/**
+ * smk_ptrace_mode - helper function for converting PTRACE_MODE_* into MAY_*
+ * @mode - input mode in form of PTRACE_MODE_*
+ *
+ * Returns a converted MAY_* mode usable by smack rules
+ */
+static inline unsigned int smk_ptrace_mode(unsigned int mode)
+{
+	if (mode & PTRACE_MODE_ATTACH)
+		return MAY_READWRITE;
+	if (mode & PTRACE_MODE_READ)
+		return MAY_READ;
+
+	return 0;
+}
+
+/**
+ * smk_ptrace_rule_check - helper for ptrace access
+ * @tracer: tracer process
+ * @tracee_known: label entry of the process that's about to be traced
+ * @mode: ptrace attachment mode (PTRACE_MODE_*)
+ * @func: name of the function that called us, used for audit
+ *
+ * Returns 0 on access granted, -error on error
+ */
+static int smk_ptrace_rule_check(struct task_struct *tracer,
+				 struct smack_known *tracee_known,
+				 unsigned int mode, const char *func)
+{
+	int rc;
+	struct smk_audit_info ad, *saip = NULL;
+	struct task_smack *tsp;
+	struct smack_known *tracer_known;
+
+	if ((mode & PTRACE_MODE_NOAUDIT) == 0) {
+		smk_ad_init(&ad, func, LSM_AUDIT_DATA_TASK);
+		smk_ad_setfield_u_tsk(&ad, tracer);
+		saip = &ad;
+	}
+
+	rcu_read_lock();
+	tsp = __task_cred(tracer)->security;
+	tracer_known = smk_of_task(tsp);
+
+	if ((mode & PTRACE_MODE_ATTACH) &&
+	    (smack_ptrace_rule == SMACK_PTRACE_EXACT ||
+	     smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)) {
+		if (tracer_known->smk_known == tracee_known->smk_known)
+			rc = 0;
+		else if (smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)
+			rc = -EACCES;
+		else if (capable(CAP_SYS_PTRACE))
+			rc = 0;
+		else
+			rc = -EACCES;
+
+		if (saip)
+			smack_log(tracer_known->smk_known,
+				  tracee_known->smk_known,
+				  0, rc, saip);
+
+		rcu_read_unlock();
+		return rc;
+	}
+
+	/* In case of rule==SMACK_PTRACE_DEFAULT or mode==PTRACE_MODE_READ */
+	rc = smk_tskacc(tsp, tracee_known, smk_ptrace_mode(mode), saip);
+
+	rcu_read_unlock();
+	return rc;
+}
+
+/*
+ * LSM hooks.
+ * We he, that is fun!
+ */
+
+/**
+ * smack_ptrace_access_check - Smack approval on PTRACE_ATTACH
+ * @ctp: child task pointer
+ * @mode: ptrace attachment mode (PTRACE_MODE_*)
+ *
+ * Returns 0 if access is OK, an error code otherwise
+ *
+ * Do the capability checks.
+ */
+static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
+{
+	struct smack_known *skp;
+
+	skp = smk_of_task_struct(ctp);
+
+	return smk_ptrace_rule_check(current, skp, mode, __func__);
+}
+
+/**
+ * smack_ptrace_traceme - Smack approval on PTRACE_TRACEME
+ * @ptp: parent task pointer
+ *
+ * Returns 0 if access is OK, an error code otherwise
+ *
+ * Do the capability checks, and require PTRACE_MODE_ATTACH.
+ */
+static int smack_ptrace_traceme(struct task_struct *ptp)
+{
+	int rc;
+	struct smack_known *skp;
+
+	skp = smk_of_task(current_security());
+
+	rc = smk_ptrace_rule_check(ptp, skp, PTRACE_MODE_ATTACH, __func__);
+	return rc;
+}
+
+/**
+ * smack_syslog - Smack approval on syslog
+ * @type: message type
+ *
+ * Returns 0 on success, error code otherwise.
+ */
+static int smack_syslog(int typefrom_file)
+{
+	int rc = 0;
+	struct smack_known *skp = smk_of_current();
+
+	if (smack_privileged(CAP_MAC_OVERRIDE))
+		return 0;
+
+	if (smack_syslog_label != NULL && smack_syslog_label != skp)
+		rc = -EACCES;
+
+	return rc;
+}
+
+
+/*
+ * Superblock Hooks.
+ */
+
+/**
+ * smack_sb_alloc_security - allocate a superblock blob
+ * @sb: the superblock getting the blob
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ */
+static int smack_sb_alloc_security(struct super_block *sb)
+{
+	struct superblock_smack *sbsp;
+
+	sbsp = kzalloc(sizeof(struct superblock_smack), GFP_KERNEL);
+
+	if (sbsp == NULL)
+		return -ENOMEM;
+
+	sbsp->smk_root = &smack_known_floor;
+	sbsp->smk_default = &smack_known_floor;
+	sbsp->smk_floor = &smack_known_floor;
+	sbsp->smk_hat = &smack_known_hat;
+	/*
+	 * smk_initialized will be zero from kzalloc.
+	 */
+	sb->s_security = sbsp;
+
+	return 0;
+}
+
+/**
+ * smack_sb_free_security - free a superblock blob
+ * @sb: the superblock getting the blob
+ *
+ */
+static void smack_sb_free_security(struct super_block *sb)
+{
+	kfree(sb->s_security);
+	sb->s_security = NULL;
+}
+
+/**
+ * smack_sb_copy_data - copy mount options data for processing
+ * @orig: where to start
+ * @smackopts: mount options string
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ *
+ * Copy the Smack specific mount options out of the mount
+ * options list.
+ */
+static int smack_sb_copy_data(char *orig, char *smackopts)
+{
+	char *cp, *commap, *otheropts, *dp;
+
+	otheropts = (char *)get_zeroed_page(GFP_KERNEL);
+	if (otheropts == NULL)
+		return -ENOMEM;
+
+	for (cp = orig, commap = orig; commap != NULL; cp = commap + 1) {
+		if (strstr(cp, SMK_FSDEFAULT) == cp)
+			dp = smackopts;
+		else if (strstr(cp, SMK_FSFLOOR) == cp)
+			dp = smackopts;
+		else if (strstr(cp, SMK_FSHAT) == cp)
+			dp = smackopts;
+		else if (strstr(cp, SMK_FSROOT) == cp)
+			dp = smackopts;
+		else if (strstr(cp, SMK_FSTRANS) == cp)
+			dp = smackopts;
+		else
+			dp = otheropts;
+
+		commap = strchr(cp, ',');
+		if (commap != NULL)
+			*commap = '\0';
+
+		if (*dp != '\0')
+			strcat(dp, ",");
+		strcat(dp, cp);
+	}
+
+	strcpy(orig, otheropts);
+	free_page((unsigned long)otheropts);
+
+	return 0;
+}
+
+/**
+ * smack_parse_opts_str - parse Smack specific mount options
+ * @options: mount options string
+ * @opts: where to store converted mount opts
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ *
+ * converts Smack specific mount options to generic security option format
+ */
+static int smack_parse_opts_str(char *options,
+		struct security_mnt_opts *opts)
+{
+	char *p;
+	char *fsdefault = NULL;
+	char *fsfloor = NULL;
+	char *fshat = NULL;
+	char *fsroot = NULL;
+	char *fstransmute = NULL;
+	int rc = -ENOMEM;
+	int num_mnt_opts = 0;
+	int token;
+
+	opts->num_mnt_opts = 0;
+
+	if (!options)
+		return 0;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+
+		if (!*p)
+			continue;
+
+		token = match_token(p, smk_mount_tokens, args);
+
+		switch (token) {
+		case Opt_fsdefault:
+			if (fsdefault)
+				goto out_opt_err;
+			fsdefault = match_strdup(&args[0]);
+			if (!fsdefault)
+				goto out_err;
+			break;
+		case Opt_fsfloor:
+			if (fsfloor)
+				goto out_opt_err;
+			fsfloor = match_strdup(&args[0]);
+			if (!fsfloor)
+				goto out_err;
+			break;
+		case Opt_fshat:
+			if (fshat)
+				goto out_opt_err;
+			fshat = match_strdup(&args[0]);
+			if (!fshat)
+				goto out_err;
+			break;
+		case Opt_fsroot:
+			if (fsroot)
+				goto out_opt_err;
+			fsroot = match_strdup(&args[0]);
+			if (!fsroot)
+				goto out_err;
+			break;
+		case Opt_fstransmute:
+			if (fstransmute)
+				goto out_opt_err;
+			fstransmute = match_strdup(&args[0]);
+			if (!fstransmute)
+				goto out_err;
+			break;
+		default:
+			rc = -EINVAL;
+			pr_warn("Smack:  unknown mount option\n");
+			goto out_err;
+		}
+	}
+
+	opts->mnt_opts = kcalloc(NUM_SMK_MNT_OPTS, sizeof(char *), GFP_ATOMIC);
+	if (!opts->mnt_opts)
+		goto out_err;
+
+	opts->mnt_opts_flags = kcalloc(NUM_SMK_MNT_OPTS, sizeof(int),
+			GFP_ATOMIC);
+	if (!opts->mnt_opts_flags) {
+		kfree(opts->mnt_opts);
+		goto out_err;
+	}
+
+	if (fsdefault) {
+		opts->mnt_opts[num_mnt_opts] = fsdefault;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSDEFAULT_MNT;
+	}
+	if (fsfloor) {
+		opts->mnt_opts[num_mnt_opts] = fsfloor;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSFLOOR_MNT;
+	}
+	if (fshat) {
+		opts->mnt_opts[num_mnt_opts] = fshat;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSHAT_MNT;
+	}
+	if (fsroot) {
+		opts->mnt_opts[num_mnt_opts] = fsroot;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSROOT_MNT;
+	}
+	if (fstransmute) {
+		opts->mnt_opts[num_mnt_opts] = fstransmute;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSTRANS_MNT;
+	}
+
+	opts->num_mnt_opts = num_mnt_opts;
+	return 0;
+
+out_opt_err:
+	rc = -EINVAL;
+	pr_warn("Smack: duplicate mount options\n");
+
+out_err:
+	kfree(fsdefault);
+	kfree(fsfloor);
+	kfree(fshat);
+	kfree(fsroot);
+	kfree(fstransmute);
+	return rc;
+}
+
+/**
+ * smack_set_mnt_opts - set Smack specific mount options
+ * @sb: the file system superblock
+ * @opts: Smack mount options
+ * @kern_flags: mount option from kernel space or user space
+ * @set_kern_flags: where to store converted mount opts
+ *
+ * Returns 0 on success, an error code on failure
+ *
+ * Allow filesystems with binary mount data to explicitly set Smack mount
+ * labels.
+ */
+static int smack_set_mnt_opts(struct super_block *sb,
+		struct security_mnt_opts *opts,
+		unsigned long kern_flags,
+		unsigned long *set_kern_flags)
+{
+	struct dentry *root = sb->s_root;
+	struct inode *inode = d_backing_inode(root);
+	struct superblock_smack *sp = sb->s_security;
+	struct inode_smack *isp;
+	struct smack_known *skp;
+	int i;
+	int num_opts = opts->num_mnt_opts;
+	int transmute = 0;
+
+	if (sp->smk_initialized)
+		return 0;
+
+	sp->smk_initialized = 1;
+
+	for (i = 0; i < num_opts; i++) {
+		switch (opts->mnt_opts_flags[i]) {
+		case FSDEFAULT_MNT:
+			skp = smk_import_entry(opts->mnt_opts[i], 0);
+			if (IS_ERR(skp))
+				return PTR_ERR(skp);
+			sp->smk_default = skp;
+			break;
+		case FSFLOOR_MNT:
+			skp = smk_import_entry(opts->mnt_opts[i], 0);
+			if (IS_ERR(skp))
+				return PTR_ERR(skp);
+			sp->smk_floor = skp;
+			break;
+		case FSHAT_MNT:
+			skp = smk_import_entry(opts->mnt_opts[i], 0);
+			if (IS_ERR(skp))
+				return PTR_ERR(skp);
+			sp->smk_hat = skp;
+			break;
+		case FSROOT_MNT:
+			skp = smk_import_entry(opts->mnt_opts[i], 0);
+			if (IS_ERR(skp))
+				return PTR_ERR(skp);
+			sp->smk_root = skp;
+			break;
+		case FSTRANS_MNT:
+			skp = smk_import_entry(opts->mnt_opts[i], 0);
+			if (IS_ERR(skp))
+				return PTR_ERR(skp);
+			sp->smk_root = skp;
+			transmute = 1;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!smack_privileged(CAP_MAC_ADMIN)) {
+		/*
+		 * Unprivileged mounts don't get to specify Smack values.
+		 */
+		if (num_opts)
+			return -EPERM;
+		/*
+		 * Unprivileged mounts get root and default from the caller.
+		 */
+		skp = smk_of_current();
+		sp->smk_root = skp;
+		sp->smk_default = skp;
+	}
+
+	/*
+	 * Initialize the root inode.
+	 */
+	isp = inode->i_security;
+	if (isp == NULL) {
+		isp = new_inode_smack(sp->smk_root);
+		if (isp == NULL)
+			return -ENOMEM;
+		inode->i_security = isp;
+	} else
+		isp->smk_inode = sp->smk_root;
+
+	if (transmute)
+		isp->smk_flags |= SMK_INODE_TRANSMUTE;
+
+	return 0;
+}
+
+/**
+ * smack_sb_kern_mount - Smack specific mount processing
+ * @sb: the file system superblock
+ * @flags: the mount flags
+ * @data: the smack mount options
+ *
+ * Returns 0 on success, an error code on failure
+ */
+static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
+{
+	int rc = 0;
+	char *options = data;
+	struct security_mnt_opts opts;
+
+	security_init_mnt_opts(&opts);
+
+	if (!options)
+		goto out;
+
+	rc = smack_parse_opts_str(options, &opts);
+	if (rc)
+		goto out_err;
+
+out:
+	rc = smack_set_mnt_opts(sb, &opts, 0, NULL);
+
+out_err:
+	security_free_mnt_opts(&opts);
+	return rc;
+}
+
+/**
+ * smack_sb_statfs - Smack check on statfs
+ * @dentry: identifies the file system in question
+ *
+ * Returns 0 if current can read the floor of the filesystem,
+ * and error code otherwise
+ */
+static int smack_sb_statfs(struct dentry *dentry)
+{
+	struct superblock_smack *sbp = dentry->d_sb->s_security;
+	int rc;
+	struct smk_audit_info ad;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+	smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+	rc = smk_curacc(sbp->smk_floor, MAY_READ, &ad);
+	rc = smk_bu_current("statfs", sbp->smk_floor, MAY_READ, rc);
+	return rc;
+}
+
+/*
+ * BPRM hooks
+ */
+
+/**
+ * smack_bprm_set_creds - set creds for exec
+ * @bprm: the exec information
+ *
+ * Returns 0 if it gets a blob, -EPERM if exec forbidden and -ENOMEM otherwise
+ */
+static int smack_bprm_set_creds(struct linux_binprm *bprm)
+{
+	struct inode *inode = file_inode(bprm->file);
+	struct task_smack *bsp = bprm->cred->security;
+	struct inode_smack *isp;
+	int rc;
+
+	if (bprm->cred_prepared)
+		return 0;
+
+	isp = inode->i_security;
+	if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
+		return 0;
+
+	if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
+		struct task_struct *tracer;
+		rc = 0;
+
+		rcu_read_lock();
+		tracer = ptrace_parent(current);
+		if (likely(tracer != NULL))
+			rc = smk_ptrace_rule_check(tracer,
+						   isp->smk_task,
+						   PTRACE_MODE_ATTACH,
+						   __func__);
+		rcu_read_unlock();
+
+		if (rc != 0)
+			return rc;
+	} else if (bprm->unsafe)
+		return -EPERM;
+
+	bsp->smk_task = isp->smk_task;
+	bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+	return 0;
+}
+
+/**
+ * smack_bprm_committing_creds - Prepare to install the new credentials
+ * from bprm.
+ *
+ * @bprm: binprm for exec
+ */
+static void smack_bprm_committing_creds(struct linux_binprm *bprm)
+{
+	struct task_smack *bsp = bprm->cred->security;
+
+	if (bsp->smk_task != bsp->smk_forked)
+		current->pdeath_signal = 0;
+}
+
+/**
+ * smack_bprm_secureexec - Return the decision to use secureexec.
+ * @bprm: binprm for exec
+ *
+ * Returns 0 on success.
+ */
+static int smack_bprm_secureexec(struct linux_binprm *bprm)
+{
+	struct task_smack *tsp = current_security();
+
+	if (tsp->smk_task != tsp->smk_forked)
+		return 1;
+
+	return 0;
+}
+
+/*
+ * Inode hooks
+ */
+
+/**
+ * smack_inode_alloc_security - allocate an inode blob
+ * @inode: the inode in need of a blob
+ *
+ * Returns 0 if it gets a blob, -ENOMEM otherwise
+ */
+static int smack_inode_alloc_security(struct inode *inode)
+{
+	struct smack_known *skp = smk_of_current();
+
+	inode->i_security = new_inode_smack(skp);
+	if (inode->i_security == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ * smack_inode_free_security - free an inode blob
+ * @inode: the inode with a blob
+ *
+ * Clears the blob pointer in inode
+ */
+static void smack_inode_free_security(struct inode *inode)
+{
+	kmem_cache_free(smack_inode_cache, inode->i_security);
+	inode->i_security = NULL;
+}
+
+/**
+ * smack_inode_init_security - copy out the smack from an inode
+ * @inode: the newly created inode
+ * @dir: containing directory object
+ * @qstr: unused
+ * @name: where to put the attribute name
+ * @value: where to put the attribute value
+ * @len: where to put the length of the attribute
+ *
+ * Returns 0 if it all works out, -ENOMEM if there's no memory
+ */
+static int smack_inode_init_security(struct inode *inode, struct inode *dir,
+				     const struct qstr *qstr, const char **name,
+				     void **value, size_t *len)
+{
+	struct inode_smack *issp = inode->i_security;
+	struct smack_known *skp = smk_of_current();
+	struct smack_known *isp = smk_of_inode(inode);
+	struct smack_known *dsp = smk_of_inode(dir);
+	int may;
+
+	if (name)
+		*name = XATTR_SMACK_SUFFIX;
+
+	if (value && len) {
+		rcu_read_lock();
+		may = smk_access_entry(skp->smk_known, dsp->smk_known,
+				       &skp->smk_rules);
+		rcu_read_unlock();
+
+		/*
+		 * If the access rule allows transmutation and
+		 * the directory requests transmutation then
+		 * by all means transmute.
+		 * Mark the inode as changed.
+		 */
+		if (may > 0 && ((may & MAY_TRANSMUTE) != 0) &&
+		    smk_inode_transmutable(dir)) {
+			isp = dsp;
+			issp->smk_flags |= SMK_INODE_CHANGED;
+		}
+
+		*value = kstrdup(isp->smk_known, GFP_NOFS);
+		if (*value == NULL)
+			return -ENOMEM;
+
+		*len = strlen(isp->smk_known);
+	}
+
+	return 0;
+}
+
+/**
+ * smack_inode_link - Smack check on link
+ * @old_dentry: the existing object
+ * @dir: unused
+ * @new_dentry: the new object
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
+			    struct dentry *new_dentry)
+{
+	struct smack_known *isp;
+	struct smk_audit_info ad;
+	int rc;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+	smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
+
+	isp = smk_of_inode(d_backing_inode(old_dentry));
+	rc = smk_curacc(isp, MAY_WRITE, &ad);
+	rc = smk_bu_inode(d_backing_inode(old_dentry), MAY_WRITE, rc);
+
+	if (rc == 0 && d_is_positive(new_dentry)) {
+		isp = smk_of_inode(d_backing_inode(new_dentry));
+		smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
+		rc = smk_curacc(isp, MAY_WRITE, &ad);
+		rc = smk_bu_inode(d_backing_inode(new_dentry), MAY_WRITE, rc);
+	}
+
+	return rc;
+}
+
+/**
+ * smack_inode_unlink - Smack check on inode deletion
+ * @dir: containing directory object
+ * @dentry: file to unlink
+ *
+ * Returns 0 if current can write the containing directory
+ * and the object, error code otherwise
+ */
+static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *ip = d_backing_inode(dentry);
+	struct smk_audit_info ad;
+	int rc;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+	smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+	/*
+	 * You need write access to the thing you're unlinking
+	 */
+	rc = smk_curacc(smk_of_inode(ip), MAY_WRITE, &ad);
+	rc = smk_bu_inode(ip, MAY_WRITE, rc);
+	if (rc == 0) {
+		/*
+		 * You also need write access to the containing directory
+		 */
+		smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
+		smk_ad_setfield_u_fs_inode(&ad, dir);
+		rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
+		rc = smk_bu_inode(dir, MAY_WRITE, rc);
+	}
+	return rc;
+}
+
+/**
+ * smack_inode_rmdir - Smack check on directory deletion
+ * @dir: containing directory object
+ * @dentry: directory to unlink
+ *
+ * Returns 0 if current can write the containing directory
+ * and the directory, error code otherwise
+ */
+static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct smk_audit_info ad;
+	int rc;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+	smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+	/*
+	 * You need write access to the thing you're removing
+	 */
+	rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+	rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
+	if (rc == 0) {
+		/*
+		 * You also need write access to the containing directory
+		 */
+		smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
+		smk_ad_setfield_u_fs_inode(&ad, dir);
+		rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
+		rc = smk_bu_inode(dir, MAY_WRITE, rc);
+	}
+
+	return rc;
+}
+
+/**
+ * smack_inode_rename - Smack check on rename
+ * @old_inode: unused
+ * @old_dentry: the old object
+ * @new_inode: unused
+ * @new_dentry: the new object
+ *
+ * Read and write access is required on both the old and
+ * new directories.
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_rename(struct inode *old_inode,
+			      struct dentry *old_dentry,
+			      struct inode *new_inode,
+			      struct dentry *new_dentry)
+{
+	int rc;
+	struct smack_known *isp;
+	struct smk_audit_info ad;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+	smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
+
+	isp = smk_of_inode(d_backing_inode(old_dentry));
+	rc = smk_curacc(isp, MAY_READWRITE, &ad);
+	rc = smk_bu_inode(d_backing_inode(old_dentry), MAY_READWRITE, rc);
+
+	if (rc == 0 && d_is_positive(new_dentry)) {
+		isp = smk_of_inode(d_backing_inode(new_dentry));
+		smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
+		rc = smk_curacc(isp, MAY_READWRITE, &ad);
+		rc = smk_bu_inode(d_backing_inode(new_dentry), MAY_READWRITE, rc);
+	}
+	return rc;
+}
+
+/**
+ * smack_inode_permission - Smack version of permission()
+ * @inode: the inode in question
+ * @mask: the access requested
+ *
+ * This is the important Smack hook.
+ *
+ * Returns 0 if access is permitted, -EACCES otherwise
+ */
+static int smack_inode_permission(struct inode *inode, int mask)
+{
+	struct smk_audit_info ad;
+	int no_block = mask & MAY_NOT_BLOCK;
+	int rc;
+
+	mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
+	/*
+	 * No permission to check. Existence test. Yup, it's there.
+	 */
+	if (mask == 0)
+		return 0;
+
+	/* May be droppable after audit */
+	if (no_block)
+		return -ECHILD;
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
+	smk_ad_setfield_u_fs_inode(&ad, inode);
+	rc = smk_curacc(smk_of_inode(inode), mask, &ad);
+	rc = smk_bu_inode(inode, mask, rc);
+	return rc;
+}
+
+/**
+ * smack_inode_setattr - Smack check for setting attributes
+ * @dentry: the object
+ * @iattr: for the force flag
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+	struct smk_audit_info ad;
+	int rc;
+
+	/*
+	 * Need to allow for clearing the setuid bit.
+	 */
+	if (iattr->ia_valid & ATTR_FORCE)
+		return 0;
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+	smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+	rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+	rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
+	return rc;
+}
+
+/**
+ * smack_inode_getattr - Smack check for getting attributes
+ * @mnt: vfsmount of the object
+ * @dentry: the object
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_getattr(const struct path *path)
+{
+	struct smk_audit_info ad;
+	struct inode *inode = d_backing_inode(path->dentry);
+	int rc;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+	smk_ad_setfield_u_fs_path(&ad, *path);
+	rc = smk_curacc(smk_of_inode(inode), MAY_READ, &ad);
+	rc = smk_bu_inode(inode, MAY_READ, rc);
+	return rc;
+}
+
+/**
+ * smack_inode_setxattr - Smack check for setting xattrs
+ * @dentry: the object
+ * @name: name of the attribute
+ * @value: value of the attribute
+ * @size: size of the value
+ * @flags: unused
+ *
+ * This protects the Smack attribute explicitly.
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_setxattr(struct dentry *dentry, const char *name,
+				const void *value, size_t size, int flags)
+{
+	struct smk_audit_info ad;
+	struct smack_known *skp;
+	int check_priv = 0;
+	int check_import = 0;
+	int check_star = 0;
+	int rc = 0;
+
+	/*
+	 * Check label validity here so import won't fail in post_setxattr
+	 */
+	if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
+	    strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
+	    strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
+		check_priv = 1;
+		check_import = 1;
+	} else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
+		   strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+		check_priv = 1;
+		check_import = 1;
+		check_star = 1;
+	} else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+		check_priv = 1;
+		if (size != TRANS_TRUE_SIZE ||
+		    strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
+			rc = -EINVAL;
+	} else
+		rc = cap_inode_setxattr(dentry, name, value, size, flags);
+
+	if (check_priv && !smack_privileged(CAP_MAC_ADMIN))
+		rc = -EPERM;
+
+	if (rc == 0 && check_import) {
+		skp = size ? smk_import_entry(value, size) : NULL;
+		if (IS_ERR(skp))
+			rc = PTR_ERR(skp);
+		else if (skp == NULL || (check_star &&
+		    (skp == &smack_known_star || skp == &smack_known_web)))
+			rc = -EINVAL;
+	}
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+	smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+	if (rc == 0) {
+		rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+		rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
+	}
+
+	return rc;
+}
+
+/**
+ * smack_inode_post_setxattr - Apply the Smack update approved above
+ * @dentry: object
+ * @name: attribute name
+ * @value: attribute value
+ * @size: attribute size
+ * @flags: unused
+ *
+ * Set the pointer in the inode blob to the entry found
+ * in the master label list.
+ */
+static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
+				      const void *value, size_t size, int flags)
+{
+	struct smack_known *skp;
+	struct inode_smack *isp = d_backing_inode(dentry)->i_security;
+
+	if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+		isp->smk_flags |= SMK_INODE_TRANSMUTE;
+		return;
+	}
+
+	if (strcmp(name, XATTR_NAME_SMACK) == 0) {
+		skp = smk_import_entry(value, size);
+		if (!IS_ERR(skp))
+			isp->smk_inode = skp;
+		else
+			isp->smk_inode = &smack_known_invalid;
+	} else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
+		skp = smk_import_entry(value, size);
+		if (!IS_ERR(skp))
+			isp->smk_task = skp;
+		else
+			isp->smk_task = &smack_known_invalid;
+	} else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+		skp = smk_import_entry(value, size);
+		if (!IS_ERR(skp))
+			isp->smk_mmap = skp;
+		else
+			isp->smk_mmap = &smack_known_invalid;
+	}
+
+	return;
+}
+
+/**
+ * smack_inode_getxattr - Smack check on getxattr
+ * @dentry: the object
+ * @name: unused
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_getxattr(struct dentry *dentry, const char *name)
+{
+	struct smk_audit_info ad;
+	int rc;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+	smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+	rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_READ, &ad);
+	rc = smk_bu_inode(d_backing_inode(dentry), MAY_READ, rc);
+	return rc;
+}
+
+/**
+ * smack_inode_removexattr - Smack check on removexattr
+ * @dentry: the object
+ * @name: name of the attribute
+ *
+ * Removing the Smack attribute requires CAP_MAC_ADMIN
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_removexattr(struct dentry *dentry, const char *name)
+{
+	struct inode_smack *isp;
+	struct smk_audit_info ad;
+	int rc = 0;
+
+	if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
+	    strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
+	    strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
+	    strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
+	    strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0 ||
+	    strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+		if (!smack_privileged(CAP_MAC_ADMIN))
+			rc = -EPERM;
+	} else
+		rc = cap_inode_removexattr(dentry, name);
+
+	if (rc != 0)
+		return rc;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+	smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+	rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+	rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
+	if (rc != 0)
+		return rc;
+
+	isp = d_backing_inode(dentry)->i_security;
+	/*
+	 * Don't do anything special for these.
+	 *	XATTR_NAME_SMACKIPIN
+	 *	XATTR_NAME_SMACKIPOUT
+	 *	XATTR_NAME_SMACKEXEC
+	 */
+	if (strcmp(name, XATTR_NAME_SMACK) == 0)
+		isp->smk_task = NULL;
+	else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0)
+		isp->smk_mmap = NULL;
+	else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0)
+		isp->smk_flags &= ~SMK_INODE_TRANSMUTE;
+
+	return 0;
+}
+
+/**
+ * smack_inode_getsecurity - get smack xattrs
+ * @inode: the object
+ * @name: attribute name
+ * @buffer: where to put the result
+ * @alloc: duplicate memory
+ *
+ * Returns the size of the attribute or an error code
+ */
+static int smack_inode_getsecurity(const struct inode *inode,
+				   const char *name, void **buffer,
+				   bool alloc)
+{
+	struct socket_smack *ssp;
+	struct socket *sock;
+	struct super_block *sbp;
+	struct inode *ip = (struct inode *)inode;
+	struct smack_known *isp;
+
+	if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
+		isp = smk_of_inode(inode);
+	else {
+		/*
+		 * The rest of the Smack xattrs are only on sockets.
+		 */
+		sbp = ip->i_sb;
+		if (sbp->s_magic != SOCKFS_MAGIC)
+			return -EOPNOTSUPP;
+
+		sock = SOCKET_I(ip);
+		if (sock == NULL || sock->sk == NULL)
+			return -EOPNOTSUPP;
+
+		ssp = sock->sk->sk_security;
+
+		if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+			isp = ssp->smk_in;
+		else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
+			isp = ssp->smk_out;
+		else
+			return -EOPNOTSUPP;
+	}
+
+	if (alloc) {
+		*buffer = kstrdup(isp->smk_known, GFP_KERNEL);
+		if (*buffer == NULL)
+			return -ENOMEM;
+	}
+
+	return strlen(isp->smk_known);
+}
+
+
+/**
+ * smack_inode_listsecurity - list the Smack attributes
+ * @inode: the object
+ * @buffer: where they go
+ * @buffer_size: size of buffer
+ *
+ * Returns 0 on success, -EINVAL otherwise
+ */
+static int smack_inode_listsecurity(struct inode *inode, char *buffer,
+				    size_t buffer_size)
+{
+	int len = sizeof(XATTR_NAME_SMACK);
+
+	if (buffer != NULL && len <= buffer_size)
+		memcpy(buffer, XATTR_NAME_SMACK, len);
+
+	return len;
+}
+
+/**
+ * smack_inode_getsecid - Extract inode's security id
+ * @inode: inode to extract the info from
+ * @secid: where result will be saved
+ */
+static void smack_inode_getsecid(const struct inode *inode, u32 *secid)
+{
+	struct inode_smack *isp = inode->i_security;
+
+	*secid = isp->smk_inode->smk_secid;
+}
+
+/*
+ * File Hooks
+ */
+
+/**
+ * smack_file_permission - Smack check on file operations
+ * @file: unused
+ * @mask: unused
+ *
+ * Returns 0
+ *
+ * Should access checks be done on each read or write?
+ * UNICOS and SELinux say yes.
+ * Trusted Solaris, Trusted Irix, and just about everyone else says no.
+ *
+ * I'll say no for now. Smack does not do the frequent
+ * label changing that SELinux does.
+ */
+static int smack_file_permission(struct file *file, int mask)
+{
+	return 0;
+}
+
+/**
+ * smack_file_alloc_security - assign a file security blob
+ * @file: the object
+ *
+ * The security blob for a file is a pointer to the master
+ * label list, so no allocation is done.
+ *
+ * f_security is the owner security information. It
+ * isn't used on file access checks, it's for send_sigio.
+ *
+ * Returns 0
+ */
+static int smack_file_alloc_security(struct file *file)
+{
+	struct smack_known *skp = smk_of_current();
+
+	file->f_security = skp;
+	return 0;
+}
+
+/**
+ * smack_file_free_security - clear a file security blob
+ * @file: the object
+ *
+ * The security blob for a file is a pointer to the master
+ * label list, so no memory is freed.
+ */
+static void smack_file_free_security(struct file *file)
+{
+	file->f_security = NULL;
+}
+
+/**
+ * smack_file_ioctl - Smack check on ioctls
+ * @file: the object
+ * @cmd: what to do
+ * @arg: unused
+ *
+ * Relies heavily on the correct use of the ioctl command conventions.
+ *
+ * Returns 0 if allowed, error code otherwise
+ */
+static int smack_file_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	int rc = 0;
+	struct smk_audit_info ad;
+	struct inode *inode = file_inode(file);
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+	smk_ad_setfield_u_fs_path(&ad, file->f_path);
+
+	if (_IOC_DIR(cmd) & _IOC_WRITE) {
+		rc = smk_curacc(smk_of_inode(inode), MAY_WRITE, &ad);
+		rc = smk_bu_file(file, MAY_WRITE, rc);
+	}
+
+	if (rc == 0 && (_IOC_DIR(cmd) & _IOC_READ)) {
+		rc = smk_curacc(smk_of_inode(inode), MAY_READ, &ad);
+		rc = smk_bu_file(file, MAY_READ, rc);
+	}
+
+	return rc;
+}
+
+/**
+ * smack_file_lock - Smack check on file locking
+ * @file: the object
+ * @cmd: unused
+ *
+ * Returns 0 if current has lock access, error code otherwise
+ */
+static int smack_file_lock(struct file *file, unsigned int cmd)
+{
+	struct smk_audit_info ad;
+	int rc;
+	struct inode *inode = file_inode(file);
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+	smk_ad_setfield_u_fs_path(&ad, file->f_path);
+	rc = smk_curacc(smk_of_inode(inode), MAY_LOCK, &ad);
+	rc = smk_bu_file(file, MAY_LOCK, rc);
+	return rc;
+}
+
+/**
+ * smack_file_fcntl - Smack check on fcntl
+ * @file: the object
+ * @cmd: what action to check
+ * @arg: unused
+ *
+ * Generally these operations are harmless.
+ * File locking operations present an obvious mechanism
+ * for passing information, so they require write access.
+ *
+ * Returns 0 if current has access, error code otherwise
+ */
+static int smack_file_fcntl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	struct smk_audit_info ad;
+	int rc = 0;
+	struct inode *inode = file_inode(file);
+
+	switch (cmd) {
+	case F_GETLK:
+		break;
+	case F_SETLK:
+	case F_SETLKW:
+		smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+		smk_ad_setfield_u_fs_path(&ad, file->f_path);
+		rc = smk_curacc(smk_of_inode(inode), MAY_LOCK, &ad);
+		rc = smk_bu_file(file, MAY_LOCK, rc);
+		break;
+	case F_SETOWN:
+	case F_SETSIG:
+		smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+		smk_ad_setfield_u_fs_path(&ad, file->f_path);
+		rc = smk_curacc(smk_of_inode(inode), MAY_WRITE, &ad);
+		rc = smk_bu_file(file, MAY_WRITE, rc);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * smack_mmap_file :
+ * Check permissions for a mmap operation.  The @file may be NULL, e.g.
+ * if mapping anonymous memory.
+ * @file contains the file structure for file to map (may be NULL).
+ * @reqprot contains the protection requested by the application.
+ * @prot contains the protection that will be applied by the kernel.
+ * @flags contains the operational flags.
+ * Return 0 if permission is granted.
+ */
+static int smack_mmap_file(struct file *file,
+			   unsigned long reqprot, unsigned long prot,
+			   unsigned long flags)
+{
+	struct smack_known *skp;
+	struct smack_known *mkp;
+	struct smack_rule *srp;
+	struct task_smack *tsp;
+	struct smack_known *okp;
+	struct inode_smack *isp;
+	int may;
+	int mmay;
+	int tmay;
+	int rc;
+
+	if (file == NULL)
+		return 0;
+
+	isp = file_inode(file)->i_security;
+	if (isp->smk_mmap == NULL)
+		return 0;
+	mkp = isp->smk_mmap;
+
+	tsp = current_security();
+	skp = smk_of_current();
+	rc = 0;
+
+	rcu_read_lock();
+	/*
+	 * For each Smack rule associated with the subject
+	 * label verify that the SMACK64MMAP also has access
+	 * to that rule's object label.
+	 */
+	list_for_each_entry_rcu(srp, &skp->smk_rules, list) {
+		okp = srp->smk_object;
+		/*
+		 * Matching labels always allows access.
+		 */
+		if (mkp->smk_known == okp->smk_known)
+			continue;
+		/*
+		 * If there is a matching local rule take
+		 * that into account as well.
+		 */
+		may = smk_access_entry(srp->smk_subject->smk_known,
+				       okp->smk_known,
+				       &tsp->smk_rules);
+		if (may == -ENOENT)
+			may = srp->smk_access;
+		else
+			may &= srp->smk_access;
+		/*
+		 * If may is zero the SMACK64MMAP subject can't
+		 * possibly have less access.
+		 */
+		if (may == 0)
+			continue;
+
+		/*
+		 * Fetch the global list entry.
+		 * If there isn't one a SMACK64MMAP subject
+		 * can't have as much access as current.
+		 */
+		mmay = smk_access_entry(mkp->smk_known, okp->smk_known,
+					&mkp->smk_rules);
+		if (mmay == -ENOENT) {
+			rc = -EACCES;
+			break;
+		}
+		/*
+		 * If there is a local entry it modifies the
+		 * potential access, too.
+		 */
+		tmay = smk_access_entry(mkp->smk_known, okp->smk_known,
+					&tsp->smk_rules);
+		if (tmay != -ENOENT)
+			mmay &= tmay;
+
+		/*
+		 * If there is any access available to current that is
+		 * not available to a SMACK64MMAP subject
+		 * deny access.
+		 */
+		if ((may | mmay) != mmay) {
+			rc = -EACCES;
+			break;
+		}
+	}
+
+	rcu_read_unlock();
+
+	return rc;
+}
+
+/**
+ * smack_file_set_fowner - set the file security blob value
+ * @file: object in question
+ *
+ */
+static void smack_file_set_fowner(struct file *file)
+{
+	file->f_security = smk_of_current();
+}
+
+/**
+ * smack_file_send_sigiotask - Smack on sigio
+ * @tsk: The target task
+ * @fown: the object the signal come from
+ * @signum: unused
+ *
+ * Allow a privileged task to get signals even if it shouldn't
+ *
+ * Returns 0 if a subject with the object's smack could
+ * write to the task, an error code otherwise.
+ */
+static int smack_file_send_sigiotask(struct task_struct *tsk,
+				     struct fown_struct *fown, int signum)
+{
+	struct smack_known *skp;
+	struct smack_known *tkp = smk_of_task(tsk->cred->security);
+	struct file *file;
+	int rc;
+	struct smk_audit_info ad;
+
+	/*
+	 * struct fown_struct is never outside the context of a struct file
+	 */
+	file = container_of(fown, struct file, f_owner);
+
+	/* we don't log here as rc can be overriden */
+	skp = file->f_security;
+	rc = smk_access(skp, tkp, MAY_WRITE, NULL);
+	rc = smk_bu_note("sigiotask", skp, tkp, MAY_WRITE, rc);
+	if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
+		rc = 0;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+	smk_ad_setfield_u_tsk(&ad, tsk);
+	smack_log(skp->smk_known, tkp->smk_known, MAY_WRITE, rc, &ad);
+	return rc;
+}
+
+/**
+ * smack_file_receive - Smack file receive check
+ * @file: the object
+ *
+ * Returns 0 if current has access, error code otherwise
+ */
+static int smack_file_receive(struct file *file)
+{
+	int rc;
+	int may = 0;
+	struct smk_audit_info ad;
+	struct inode *inode = file_inode(file);
+
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+	smk_ad_setfield_u_fs_path(&ad, file->f_path);
+	/*
+	 * This code relies on bitmasks.
+	 */
+	if (file->f_mode & FMODE_READ)
+		may = MAY_READ;
+	if (file->f_mode & FMODE_WRITE)
+		may |= MAY_WRITE;
+
+	rc = smk_curacc(smk_of_inode(inode), may, &ad);
+	rc = smk_bu_file(file, may, rc);
+	return rc;
+}
+
+/**
+ * smack_file_open - Smack dentry open processing
+ * @file: the object
+ * @cred: task credential
+ *
+ * Set the security blob in the file structure.
+ * Allow the open only if the task has read access. There are
+ * many read operations (e.g. fstat) that you can do with an
+ * fd even if you have the file open write-only.
+ *
+ * Returns 0
+ */
+static int smack_file_open(struct file *file, const struct cred *cred)
+{
+	struct task_smack *tsp = cred->security;
+	struct inode *inode = file_inode(file);
+	struct smk_audit_info ad;
+	int rc;
+
+	if (smack_privileged(CAP_MAC_OVERRIDE))
+		return 0;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+	smk_ad_setfield_u_fs_path(&ad, file->f_path);
+	rc = smk_access(tsp->smk_task, smk_of_inode(inode), MAY_READ, &ad);
+	rc = smk_bu_credfile(cred, file, MAY_READ, rc);
+
+	return rc;
+}
+
+/*
+ * Task hooks
+ */
+
+/**
+ * smack_cred_alloc_blank - "allocate" blank task-level security credentials
+ * @new: the new credentials
+ * @gfp: the atomicity of any memory allocations
+ *
+ * Prepare a blank set of credentials for modification.  This must allocate all
+ * the memory the LSM module might require such that cred_transfer() can
+ * complete without error.
+ */
+static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+	struct task_smack *tsp;
+
+	tsp = new_task_smack(NULL, NULL, gfp);
+	if (tsp == NULL)
+		return -ENOMEM;
+
+	cred->security = tsp;
+
+	return 0;
+}
+
+
+/**
+ * smack_cred_free - "free" task-level security credentials
+ * @cred: the credentials in question
+ *
+ */
+static void smack_cred_free(struct cred *cred)
+{
+	struct task_smack *tsp = cred->security;
+	struct smack_rule *rp;
+	struct list_head *l;
+	struct list_head *n;
+
+	if (tsp == NULL)
+		return;
+	cred->security = NULL;
+
+	smk_destroy_label_list(&tsp->smk_relabel);
+
+	list_for_each_safe(l, n, &tsp->smk_rules) {
+		rp = list_entry(l, struct smack_rule, list);
+		list_del(&rp->list);
+		kfree(rp);
+	}
+	kfree(tsp);
+}
+
+/**
+ * smack_cred_prepare - prepare new set of credentials for modification
+ * @new: the new credentials
+ * @old: the original credentials
+ * @gfp: the atomicity of any memory allocations
+ *
+ * Prepare a new set of credentials for modification.
+ */
+static int smack_cred_prepare(struct cred *new, const struct cred *old,
+			      gfp_t gfp)
+{
+	struct task_smack *old_tsp = old->security;
+	struct task_smack *new_tsp;
+	int rc;
+
+	new_tsp = new_task_smack(old_tsp->smk_task, old_tsp->smk_task, gfp);
+	if (new_tsp == NULL)
+		return -ENOMEM;
+
+	rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp);
+	if (rc != 0)
+		return rc;
+
+	rc = smk_copy_relabel(&new_tsp->smk_relabel, &old_tsp->smk_relabel,
+				gfp);
+	if (rc != 0)
+		return rc;
+
+	new->security = new_tsp;
+	return 0;
+}
+
+/**
+ * smack_cred_transfer - Transfer the old credentials to the new credentials
+ * @new: the new credentials
+ * @old: the original credentials
+ *
+ * Fill in a set of blank credentials from another set of credentials.
+ */
+static void smack_cred_transfer(struct cred *new, const struct cred *old)
+{
+	struct task_smack *old_tsp = old->security;
+	struct task_smack *new_tsp = new->security;
+
+	new_tsp->smk_task = old_tsp->smk_task;
+	new_tsp->smk_forked = old_tsp->smk_task;
+	mutex_init(&new_tsp->smk_rules_lock);
+	INIT_LIST_HEAD(&new_tsp->smk_rules);
+
+
+	/* cbs copy rule list */
+}
+
+/**
+ * smack_kernel_act_as - Set the subjective context in a set of credentials
+ * @new: points to the set of credentials to be modified.
+ * @secid: specifies the security ID to be set
+ *
+ * Set the security data for a kernel service.
+ */
+static int smack_kernel_act_as(struct cred *new, u32 secid)
+{
+	struct task_smack *new_tsp = new->security;
+	struct smack_known *skp = smack_from_secid(secid);
+
+	if (skp == NULL)
+		return -EINVAL;
+
+	new_tsp->smk_task = skp;
+	return 0;
+}
+
+/**
+ * smack_kernel_create_files_as - Set the file creation label in a set of creds
+ * @new: points to the set of credentials to be modified
+ * @inode: points to the inode to use as a reference
+ *
+ * Set the file creation context in a set of credentials to the same
+ * as the objective context of the specified inode
+ */
+static int smack_kernel_create_files_as(struct cred *new,
+					struct inode *inode)
+{
+	struct inode_smack *isp = inode->i_security;
+	struct task_smack *tsp = new->security;
+
+	tsp->smk_forked = isp->smk_inode;
+	tsp->smk_task = tsp->smk_forked;
+	return 0;
+}
+
+/**
+ * smk_curacc_on_task - helper to log task related access
+ * @p: the task object
+ * @access: the access requested
+ * @caller: name of the calling function for audit
+ *
+ * Return 0 if access is permitted
+ */
+static int smk_curacc_on_task(struct task_struct *p, int access,
+				const char *caller)
+{
+	struct smk_audit_info ad;
+	struct smack_known *skp = smk_of_task_struct(p);
+	int rc;
+
+	smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
+	smk_ad_setfield_u_tsk(&ad, p);
+	rc = smk_curacc(skp, access, &ad);
+	rc = smk_bu_task(p, access, rc);
+	return rc;
+}
+
+/**
+ * smack_task_setpgid - Smack check on setting pgid
+ * @p: the task object
+ * @pgid: unused
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
+{
+	return smk_curacc_on_task(p, MAY_WRITE, __func__);
+}
+
+/**
+ * smack_task_getpgid - Smack access check for getpgid
+ * @p: the object task
+ *
+ * Returns 0 if current can read the object task, error code otherwise
+ */
+static int smack_task_getpgid(struct task_struct *p)
+{
+	return smk_curacc_on_task(p, MAY_READ, __func__);
+}
+
+/**
+ * smack_task_getsid - Smack access check for getsid
+ * @p: the object task
+ *
+ * Returns 0 if current can read the object task, error code otherwise
+ */
+static int smack_task_getsid(struct task_struct *p)
+{
+	return smk_curacc_on_task(p, MAY_READ, __func__);
+}
+
+/**
+ * smack_task_getsecid - get the secid of the task
+ * @p: the object task
+ * @secid: where to put the result
+ *
+ * Sets the secid to contain a u32 version of the smack label.
+ */
+static void smack_task_getsecid(struct task_struct *p, u32 *secid)
+{
+	struct smack_known *skp = smk_of_task_struct(p);
+
+	*secid = skp->smk_secid;
+}
+
+/**
+ * smack_task_setnice - Smack check on setting nice
+ * @p: the task object
+ * @nice: unused
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_setnice(struct task_struct *p, int nice)
+{
+	return smk_curacc_on_task(p, MAY_WRITE, __func__);
+}
+
+/**
+ * smack_task_setioprio - Smack check on setting ioprio
+ * @p: the task object
+ * @ioprio: unused
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_setioprio(struct task_struct *p, int ioprio)
+{
+	return smk_curacc_on_task(p, MAY_WRITE, __func__);
+}
+
+/**
+ * smack_task_getioprio - Smack check on reading ioprio
+ * @p: the task object
+ *
+ * Return 0 if read access is permitted
+ */
+static int smack_task_getioprio(struct task_struct *p)
+{
+	return smk_curacc_on_task(p, MAY_READ, __func__);
+}
+
+/**
+ * smack_task_setscheduler - Smack check on setting scheduler
+ * @p: the task object
+ * @policy: unused
+ * @lp: unused
+ *
+ * Return 0 if read access is permitted
+ */
+static int smack_task_setscheduler(struct task_struct *p)
+{
+	return smk_curacc_on_task(p, MAY_WRITE, __func__);
+}
+
+/**
+ * smack_task_getscheduler - Smack check on reading scheduler
+ * @p: the task object
+ *
+ * Return 0 if read access is permitted
+ */
+static int smack_task_getscheduler(struct task_struct *p)
+{
+	return smk_curacc_on_task(p, MAY_READ, __func__);
+}
+
+/**
+ * smack_task_movememory - Smack check on moving memory
+ * @p: the task object
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_movememory(struct task_struct *p)
+{
+	return smk_curacc_on_task(p, MAY_WRITE, __func__);
+}
+
+/**
+ * smack_task_kill - Smack check on signal delivery
+ * @p: the task object
+ * @info: unused
+ * @sig: unused
+ * @secid: identifies the smack to use in lieu of current's
+ *
+ * Return 0 if write access is permitted
+ *
+ * The secid behavior is an artifact of an SELinux hack
+ * in the USB code. Someday it may go away.
+ */
+static int smack_task_kill(struct task_struct *p, struct siginfo *info,
+			   int sig, u32 secid)
+{
+	struct smk_audit_info ad;
+	struct smack_known *skp;
+	struct smack_known *tkp = smk_of_task_struct(p);
+	int rc;
+
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+	smk_ad_setfield_u_tsk(&ad, p);
+	/*
+	 * Sending a signal requires that the sender
+	 * can write the receiver.
+	 */
+	if (secid == 0) {
+		rc = smk_curacc(tkp, MAY_WRITE, &ad);
+		rc = smk_bu_task(p, MAY_WRITE, rc);
+		return rc;
+	}
+	/*
+	 * If the secid isn't 0 we're dealing with some USB IO
+	 * specific behavior. This is not clean. For one thing
+	 * we can't take privilege into account.
+	 */
+	skp = smack_from_secid(secid);
+	rc = smk_access(skp, tkp, MAY_WRITE, &ad);
+	rc = smk_bu_note("USB signal", skp, tkp, MAY_WRITE, rc);
+	return rc;
+}
+
+/**
+ * smack_task_wait - Smack access check for waiting
+ * @p: task to wait for
+ *
+ * Returns 0
+ */
+static int smack_task_wait(struct task_struct *p)
+{
+	/*
+	 * Allow the operation to succeed.
+	 * Zombies are bad.
+	 * In userless environments (e.g. phones) programs
+	 * get marked with SMACK64EXEC and even if the parent
+	 * and child shouldn't be talking the parent still
+	 * may expect to know when the child exits.
+	 */
+	return 0;
+}
+
+/**
+ * smack_task_to_inode - copy task smack into the inode blob
+ * @p: task to copy from
+ * @inode: inode to copy to
+ *
+ * Sets the smack pointer in the inode security blob
+ */
+static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
+{
+	struct inode_smack *isp = inode->i_security;
+	struct smack_known *skp = smk_of_task_struct(p);
+
+	isp->smk_inode = skp;
+}
+
+/*
+ * Socket hooks.
+ */
+
+/**
+ * smack_sk_alloc_security - Allocate a socket blob
+ * @sk: the socket
+ * @family: unused
+ * @gfp_flags: memory allocation flags
+ *
+ * Assign Smack pointers to current
+ *
+ * Returns 0 on success, -ENOMEM is there's no memory
+ */
+static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
+{
+	struct smack_known *skp = smk_of_current();
+	struct socket_smack *ssp;
+
+	ssp = kzalloc(sizeof(struct socket_smack), gfp_flags);
+	if (ssp == NULL)
+		return -ENOMEM;
+
+	ssp->smk_in = skp;
+	ssp->smk_out = skp;
+	ssp->smk_packet = NULL;
+
+	sk->sk_security = ssp;
+
+	return 0;
+}
+
+/**
+ * smack_sk_free_security - Free a socket blob
+ * @sk: the socket
+ *
+ * Clears the blob pointer
+ */
+static void smack_sk_free_security(struct sock *sk)
+{
+	kfree(sk->sk_security);
+}
+
+/**
+* smack_ipv4host_label - check host based restrictions
+* @sip: the object end
+*
+* looks for host based access restrictions
+*
+* This version will only be appropriate for really small sets of single label
+* hosts.  The caller is responsible for ensuring that the RCU read lock is
+* taken before calling this function.
+*
+* Returns the label of the far end or NULL if it's not special.
+*/
+static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip)
+{
+	struct smk_net4addr *snp;
+	struct in_addr *siap = &sip->sin_addr;
+
+	if (siap->s_addr == 0)
+		return NULL;
+
+	list_for_each_entry_rcu(snp, &smk_net4addr_list, list)
+		/*
+		 * we break after finding the first match because
+		 * the list is sorted from longest to shortest mask
+		 * so we have found the most specific match
+		 */
+		if (snp->smk_host.s_addr ==
+		    (siap->s_addr & snp->smk_mask.s_addr))
+			return snp->smk_label;
+
+	return NULL;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/*
+ * smk_ipv6_localhost - Check for local ipv6 host address
+ * @sip: the address
+ *
+ * Returns boolean true if this is the localhost address
+ */
+static bool smk_ipv6_localhost(struct sockaddr_in6 *sip)
+{
+	__be16 *be16p = (__be16 *)&sip->sin6_addr;
+	__be32 *be32p = (__be32 *)&sip->sin6_addr;
+
+	if (be32p[0] == 0 && be32p[1] == 0 && be32p[2] == 0 && be16p[6] == 0 &&
+	    ntohs(be16p[7]) == 1)
+		return true;
+	return false;
+}
+
+/**
+* smack_ipv6host_label - check host based restrictions
+* @sip: the object end
+*
+* looks for host based access restrictions
+*
+* This version will only be appropriate for really small sets of single label
+* hosts.  The caller is responsible for ensuring that the RCU read lock is
+* taken before calling this function.
+*
+* Returns the label of the far end or NULL if it's not special.
+*/
+static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
+{
+	struct smk_net6addr *snp;
+	struct in6_addr *sap = &sip->sin6_addr;
+	int i;
+	int found = 0;
+
+	/*
+	 * It's local. Don't look for a host label.
+	 */
+	if (smk_ipv6_localhost(sip))
+		return NULL;
+
+	list_for_each_entry_rcu(snp, &smk_net6addr_list, list) {
+		/*
+		* we break after finding the first match because
+		* the list is sorted from longest to shortest mask
+		* so we have found the most specific match
+		*/
+		for (found = 1, i = 0; i < 8; i++) {
+			/*
+			 * If the label is NULL the entry has
+			 * been renounced. Ignore it.
+			 */
+			if (snp->smk_label == NULL)
+				continue;
+			if ((sap->s6_addr16[i] & snp->smk_mask.s6_addr16[i]) !=
+			    snp->smk_host.s6_addr16[i]) {
+				found = 0;
+				break;
+			}
+		}
+		if (found)
+			return snp->smk_label;
+	}
+
+	return NULL;
+}
+#endif /* CONFIG_IPV6 */
+
+/**
+ * smack_netlabel - Set the secattr on a socket
+ * @sk: the socket
+ * @labeled: socket label scheme
+ *
+ * Convert the outbound smack value (smk_out) to a
+ * secattr and attach it to the socket.
+ *
+ * Returns 0 on success or an error code
+ */
+static int smack_netlabel(struct sock *sk, int labeled)
+{
+	struct smack_known *skp;
+	struct socket_smack *ssp = sk->sk_security;
+	int rc = 0;
+
+	/*
+	 * Usually the netlabel code will handle changing the
+	 * packet labeling based on the label.
+	 * The case of a single label host is different, because
+	 * a single label host should never get a labeled packet
+	 * even though the label is usually associated with a packet
+	 * label.
+	 */
+	local_bh_disable();
+	bh_lock_sock_nested(sk);
+
+	if (ssp->smk_out == smack_net_ambient ||
+	    labeled == SMACK_UNLABELED_SOCKET)
+		netlbl_sock_delattr(sk);
+	else {
+		skp = ssp->smk_out;
+		rc = netlbl_sock_setattr(sk, sk->sk_family, &skp->smk_netlabel);
+	}
+
+	bh_unlock_sock(sk);
+	local_bh_enable();
+
+	return rc;
+}
+
+/**
+ * smack_netlbel_send - Set the secattr on a socket and perform access checks
+ * @sk: the socket
+ * @sap: the destination address
+ *
+ * Set the correct secattr for the given socket based on the destination
+ * address and perform any outbound access checks needed.
+ *
+ * Returns 0 on success or an error code.
+ *
+ */
+static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
+{
+	struct smack_known *skp;
+	int rc;
+	int sk_lbl;
+	struct smack_known *hkp;
+	struct socket_smack *ssp = sk->sk_security;
+	struct smk_audit_info ad;
+
+	rcu_read_lock();
+	hkp = smack_ipv4host_label(sap);
+	if (hkp != NULL) {
+#ifdef CONFIG_AUDIT
+		struct lsm_network_audit net;
+
+		smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+		ad.a.u.net->family = sap->sin_family;
+		ad.a.u.net->dport = sap->sin_port;
+		ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr;
+#endif
+		sk_lbl = SMACK_UNLABELED_SOCKET;
+		skp = ssp->smk_out;
+		rc = smk_access(skp, hkp, MAY_WRITE, &ad);
+		rc = smk_bu_note("IPv4 host check", skp, hkp, MAY_WRITE, rc);
+	} else {
+		sk_lbl = SMACK_CIPSO_SOCKET;
+		rc = 0;
+	}
+	rcu_read_unlock();
+	if (rc != 0)
+		return rc;
+
+	return smack_netlabel(sk, sk_lbl);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/**
+ * smk_ipv6_check - check Smack access
+ * @subject: subject Smack label
+ * @object: object Smack label
+ * @address: address
+ * @act: the action being taken
+ *
+ * Check an IPv6 access
+ */
+static int smk_ipv6_check(struct smack_known *subject,
+				struct smack_known *object,
+				struct sockaddr_in6 *address, int act)
+{
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+#endif
+	struct smk_audit_info ad;
+	int rc;
+
+#ifdef CONFIG_AUDIT
+	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+	ad.a.u.net->family = PF_INET6;
+	ad.a.u.net->dport = ntohs(address->sin6_port);
+	if (act == SMK_RECEIVING)
+		ad.a.u.net->v6info.saddr = address->sin6_addr;
+	else
+		ad.a.u.net->v6info.daddr = address->sin6_addr;
+#endif
+	rc = smk_access(subject, object, MAY_WRITE, &ad);
+	rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc);
+	return rc;
+}
+#endif /* CONFIG_IPV6 */
+
+#ifdef SMACK_IPV6_PORT_LABELING
+/**
+ * smk_ipv6_port_label - Smack port access table management
+ * @sock: socket
+ * @address: address
+ *
+ * Create or update the port list entry
+ */
+static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
+{
+	struct sock *sk = sock->sk;
+	struct sockaddr_in6 *addr6;
+	struct socket_smack *ssp = sock->sk->sk_security;
+	struct smk_port_label *spp;
+	unsigned short port = 0;
+
+	if (address == NULL) {
+		/*
+		 * This operation is changing the Smack information
+		 * on the bound socket. Take the changes to the port
+		 * as well.
+		 */
+		list_for_each_entry(spp, &smk_ipv6_port_list, list) {
+			if (sk != spp->smk_sock)
+				continue;
+			spp->smk_in = ssp->smk_in;
+			spp->smk_out = ssp->smk_out;
+			return;
+		}
+		/*
+		 * A NULL address is only used for updating existing
+		 * bound entries. If there isn't one, it's OK.
+		 */
+		return;
+	}
+
+	addr6 = (struct sockaddr_in6 *)address;
+	port = ntohs(addr6->sin6_port);
+	/*
+	 * This is a special case that is safely ignored.
+	 */
+	if (port == 0)
+		return;
+
+	/*
+	 * Look for an existing port list entry.
+	 * This is an indication that a port is getting reused.
+	 */
+	list_for_each_entry(spp, &smk_ipv6_port_list, list) {
+		if (spp->smk_port != port)
+			continue;
+		spp->smk_port = port;
+		spp->smk_sock = sk;
+		spp->smk_in = ssp->smk_in;
+		spp->smk_out = ssp->smk_out;
+		return;
+	}
+
+	/*
+	 * A new port entry is required.
+	 */
+	spp = kzalloc(sizeof(*spp), GFP_KERNEL);
+	if (spp == NULL)
+		return;
+
+	spp->smk_port = port;
+	spp->smk_sock = sk;
+	spp->smk_in = ssp->smk_in;
+	spp->smk_out = ssp->smk_out;
+
+	list_add(&spp->list, &smk_ipv6_port_list);
+	return;
+}
+
+/**
+ * smk_ipv6_port_check - check Smack port access
+ * @sock: socket
+ * @address: address
+ *
+ * Create or update the port list entry
+ */
+static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
+				int act)
+{
+	struct smk_port_label *spp;
+	struct socket_smack *ssp = sk->sk_security;
+	struct smack_known *skp = NULL;
+	unsigned short port;
+	struct smack_known *object;
+
+	if (act == SMK_RECEIVING) {
+		skp = smack_ipv6host_label(address);
+		object = ssp->smk_in;
+	} else {
+		skp = ssp->smk_out;
+		object = smack_ipv6host_label(address);
+	}
+
+	/*
+	 * The other end is a single label host.
+	 */
+	if (skp != NULL && object != NULL)
+		return smk_ipv6_check(skp, object, address, act);
+	if (skp == NULL)
+		skp = smack_net_ambient;
+	if (object == NULL)
+		object = smack_net_ambient;
+
+	/*
+	 * It's remote, so port lookup does no good.
+	 */
+	if (!smk_ipv6_localhost(address))
+		return smk_ipv6_check(skp, object, address, act);
+
+	/*
+	 * It's local so the send check has to have passed.
+	 */
+	if (act == SMK_RECEIVING)
+		return 0;
+
+	port = ntohs(address->sin6_port);
+	list_for_each_entry(spp, &smk_ipv6_port_list, list) {
+		if (spp->smk_port != port)
+			continue;
+		object = spp->smk_in;
+		if (act == SMK_CONNECTING)
+			ssp->smk_packet = spp->smk_out;
+		break;
+	}
+
+	return smk_ipv6_check(skp, object, address, act);
+}
+#endif /* SMACK_IPV6_PORT_LABELING */
+
+/**
+ * smack_inode_setsecurity - set smack xattrs
+ * @inode: the object
+ * @name: attribute name
+ * @value: attribute value
+ * @size: size of the attribute
+ * @flags: unused
+ *
+ * Sets the named attribute in the appropriate blob
+ *
+ * Returns 0 on success, or an error code
+ */
+static int smack_inode_setsecurity(struct inode *inode, const char *name,
+				   const void *value, size_t size, int flags)
+{
+	struct smack_known *skp;
+	struct inode_smack *nsp = inode->i_security;
+	struct socket_smack *ssp;
+	struct socket *sock;
+	int rc = 0;
+
+	if (value == NULL || size > SMK_LONGLABEL || size == 0)
+		return -EINVAL;
+
+	skp = smk_import_entry(value, size);
+	if (IS_ERR(skp))
+		return PTR_ERR(skp);
+
+	if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
+		nsp->smk_inode = skp;
+		nsp->smk_flags |= SMK_INODE_INSTANT;
+		return 0;
+	}
+	/*
+	 * The rest of the Smack xattrs are only on sockets.
+	 */
+	if (inode->i_sb->s_magic != SOCKFS_MAGIC)
+		return -EOPNOTSUPP;
+
+	sock = SOCKET_I(inode);
+	if (sock == NULL || sock->sk == NULL)
+		return -EOPNOTSUPP;
+
+	ssp = sock->sk->sk_security;
+
+	if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+		ssp->smk_in = skp;
+	else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) {
+		ssp->smk_out = skp;
+		if (sock->sk->sk_family == PF_INET) {
+			rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
+			if (rc != 0)
+				printk(KERN_WARNING
+					"Smack: \"%s\" netlbl error %d.\n",
+					__func__, -rc);
+		}
+	} else
+		return -EOPNOTSUPP;
+
+#ifdef SMACK_IPV6_PORT_LABELING
+	if (sock->sk->sk_family == PF_INET6)
+		smk_ipv6_port_label(sock, NULL);
+#endif
+
+	return 0;
+}
+
+/**
+ * smack_socket_post_create - finish socket setup
+ * @sock: the socket
+ * @family: protocol family
+ * @type: unused
+ * @protocol: unused
+ * @kern: unused
+ *
+ * Sets the netlabel information on the socket
+ *
+ * Returns 0 on success, and error code otherwise
+ */
+static int smack_socket_post_create(struct socket *sock, int family,
+				    int type, int protocol, int kern)
+{
+	struct socket_smack *ssp;
+
+	if (sock->sk == NULL)
+		return 0;
+
+	/*
+	 * Sockets created by kernel threads receive web label.
+	 */
+	if (unlikely(current->flags & PF_KTHREAD)) {
+		ssp = sock->sk->sk_security;
+		ssp->smk_in = &smack_known_web;
+		ssp->smk_out = &smack_known_web;
+	}
+
+	if (family != PF_INET)
+		return 0;
+	/*
+	 * Set the outbound netlbl.
+	 */
+	return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
+}
+
+#ifdef SMACK_IPV6_PORT_LABELING
+/**
+ * smack_socket_bind - record port binding information.
+ * @sock: the socket
+ * @address: the port address
+ * @addrlen: size of the address
+ *
+ * Records the label bound to a port.
+ *
+ * Returns 0
+ */
+static int smack_socket_bind(struct socket *sock, struct sockaddr *address,
+				int addrlen)
+{
+	if (sock->sk != NULL && sock->sk->sk_family == PF_INET6)
+		smk_ipv6_port_label(sock, address);
+	return 0;
+}
+#endif /* SMACK_IPV6_PORT_LABELING */
+
+/**
+ * smack_socket_connect - connect access check
+ * @sock: the socket
+ * @sap: the other end
+ * @addrlen: size of sap
+ *
+ * Verifies that a connection may be possible
+ *
+ * Returns 0 on success, and error code otherwise
+ */
+static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
+				int addrlen)
+{
+	int rc = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+	struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
+#endif
+#ifdef SMACK_IPV6_SECMARK_LABELING
+	struct smack_known *rsp;
+	struct socket_smack *ssp = sock->sk->sk_security;
+#endif
+
+	if (sock->sk == NULL)
+		return 0;
+
+	switch (sock->sk->sk_family) {
+	case PF_INET:
+		if (addrlen < sizeof(struct sockaddr_in))
+			return -EINVAL;
+		rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
+		break;
+	case PF_INET6:
+		if (addrlen < sizeof(struct sockaddr_in6))
+			return -EINVAL;
+#ifdef SMACK_IPV6_SECMARK_LABELING
+		rsp = smack_ipv6host_label(sip);
+		if (rsp != NULL)
+			rc = smk_ipv6_check(ssp->smk_out, rsp, sip,
+						SMK_CONNECTING);
+#endif
+#ifdef SMACK_IPV6_PORT_LABELING
+		rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
+#endif
+		break;
+	}
+	return rc;
+}
+
+/**
+ * smack_flags_to_may - convert S_ to MAY_ values
+ * @flags: the S_ value
+ *
+ * Returns the equivalent MAY_ value
+ */
+static int smack_flags_to_may(int flags)
+{
+	int may = 0;
+
+	if (flags & S_IRUGO)
+		may |= MAY_READ;
+	if (flags & S_IWUGO)
+		may |= MAY_WRITE;
+	if (flags & S_IXUGO)
+		may |= MAY_EXEC;
+
+	return may;
+}
+
+/**
+ * smack_msg_msg_alloc_security - Set the security blob for msg_msg
+ * @msg: the object
+ *
+ * Returns 0
+ */
+static int smack_msg_msg_alloc_security(struct msg_msg *msg)
+{
+	struct smack_known *skp = smk_of_current();
+
+	msg->security = skp;
+	return 0;
+}
+
+/**
+ * smack_msg_msg_free_security - Clear the security blob for msg_msg
+ * @msg: the object
+ *
+ * Clears the blob pointer
+ */
+static void smack_msg_msg_free_security(struct msg_msg *msg)
+{
+	msg->security = NULL;
+}
+
+/**
+ * smack_of_shm - the smack pointer for the shm
+ * @shp: the object
+ *
+ * Returns a pointer to the smack value
+ */
+static struct smack_known *smack_of_shm(struct shmid_kernel *shp)
+{
+	return (struct smack_known *)shp->shm_perm.security;
+}
+
+/**
+ * smack_shm_alloc_security - Set the security blob for shm
+ * @shp: the object
+ *
+ * Returns 0
+ */
+static int smack_shm_alloc_security(struct shmid_kernel *shp)
+{
+	struct kern_ipc_perm *isp = &shp->shm_perm;
+	struct smack_known *skp = smk_of_current();
+
+	isp->security = skp;
+	return 0;
+}
+
+/**
+ * smack_shm_free_security - Clear the security blob for shm
+ * @shp: the object
+ *
+ * Clears the blob pointer
+ */
+static void smack_shm_free_security(struct shmid_kernel *shp)
+{
+	struct kern_ipc_perm *isp = &shp->shm_perm;
+
+	isp->security = NULL;
+}
+
+/**
+ * smk_curacc_shm : check if current has access on shm
+ * @shp : the object
+ * @access : access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smk_curacc_shm(struct shmid_kernel *shp, int access)
+{
+	struct smack_known *ssp = smack_of_shm(shp);
+	struct smk_audit_info ad;
+	int rc;
+
+#ifdef CONFIG_AUDIT
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
+	ad.a.u.ipc_id = shp->shm_perm.id;
+#endif
+	rc = smk_curacc(ssp, access, &ad);
+	rc = smk_bu_current("shm", ssp, access, rc);
+	return rc;
+}
+
+/**
+ * smack_shm_associate - Smack access check for shm
+ * @shp: the object
+ * @shmflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_shm_associate(struct shmid_kernel *shp, int shmflg)
+{
+	int may;
+
+	may = smack_flags_to_may(shmflg);
+	return smk_curacc_shm(shp, may);
+}
+
+/**
+ * smack_shm_shmctl - Smack access check for shm
+ * @shp: the object
+ * @cmd: what it wants to do
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_shm_shmctl(struct shmid_kernel *shp, int cmd)
+{
+	int may;
+
+	switch (cmd) {
+	case IPC_STAT:
+	case SHM_STAT:
+		may = MAY_READ;
+		break;
+	case IPC_SET:
+	case SHM_LOCK:
+	case SHM_UNLOCK:
+	case IPC_RMID:
+		may = MAY_READWRITE;
+		break;
+	case IPC_INFO:
+	case SHM_INFO:
+		/*
+		 * System level information.
+		 */
+		return 0;
+	default:
+		return -EINVAL;
+	}
+	return smk_curacc_shm(shp, may);
+}
+
+/**
+ * smack_shm_shmat - Smack access for shmat
+ * @shp: the object
+ * @shmaddr: unused
+ * @shmflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr,
+			   int shmflg)
+{
+	int may;
+
+	may = smack_flags_to_may(shmflg);
+	return smk_curacc_shm(shp, may);
+}
+
+/**
+ * smack_of_sem - the smack pointer for the sem
+ * @sma: the object
+ *
+ * Returns a pointer to the smack value
+ */
+static struct smack_known *smack_of_sem(struct sem_array *sma)
+{
+	return (struct smack_known *)sma->sem_perm.security;
+}
+
+/**
+ * smack_sem_alloc_security - Set the security blob for sem
+ * @sma: the object
+ *
+ * Returns 0
+ */
+static int smack_sem_alloc_security(struct sem_array *sma)
+{
+	struct kern_ipc_perm *isp = &sma->sem_perm;
+	struct smack_known *skp = smk_of_current();
+
+	isp->security = skp;
+	return 0;
+}
+
+/**
+ * smack_sem_free_security - Clear the security blob for sem
+ * @sma: the object
+ *
+ * Clears the blob pointer
+ */
+static void smack_sem_free_security(struct sem_array *sma)
+{
+	struct kern_ipc_perm *isp = &sma->sem_perm;
+
+	isp->security = NULL;
+}
+
+/**
+ * smk_curacc_sem : check if current has access on sem
+ * @sma : the object
+ * @access : access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smk_curacc_sem(struct sem_array *sma, int access)
+{
+	struct smack_known *ssp = smack_of_sem(sma);
+	struct smk_audit_info ad;
+	int rc;
+
+#ifdef CONFIG_AUDIT
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
+	ad.a.u.ipc_id = sma->sem_perm.id;
+#endif
+	rc = smk_curacc(ssp, access, &ad);
+	rc = smk_bu_current("sem", ssp, access, rc);
+	return rc;
+}
+
+/**
+ * smack_sem_associate - Smack access check for sem
+ * @sma: the object
+ * @semflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_sem_associate(struct sem_array *sma, int semflg)
+{
+	int may;
+
+	may = smack_flags_to_may(semflg);
+	return smk_curacc_sem(sma, may);
+}
+
+/**
+ * smack_sem_shmctl - Smack access check for sem
+ * @sma: the object
+ * @cmd: what it wants to do
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_sem_semctl(struct sem_array *sma, int cmd)
+{
+	int may;
+
+	switch (cmd) {
+	case GETPID:
+	case GETNCNT:
+	case GETZCNT:
+	case GETVAL:
+	case GETALL:
+	case IPC_STAT:
+	case SEM_STAT:
+		may = MAY_READ;
+		break;
+	case SETVAL:
+	case SETALL:
+	case IPC_RMID:
+	case IPC_SET:
+		may = MAY_READWRITE;
+		break;
+	case IPC_INFO:
+	case SEM_INFO:
+		/*
+		 * System level information
+		 */
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	return smk_curacc_sem(sma, may);
+}
+
+/**
+ * smack_sem_semop - Smack checks of semaphore operations
+ * @sma: the object
+ * @sops: unused
+ * @nsops: unused
+ * @alter: unused
+ *
+ * Treated as read and write in all cases.
+ *
+ * Returns 0 if access is allowed, error code otherwise
+ */
+static int smack_sem_semop(struct sem_array *sma, struct sembuf *sops,
+			   unsigned nsops, int alter)
+{
+	return smk_curacc_sem(sma, MAY_READWRITE);
+}
+
+/**
+ * smack_msg_alloc_security - Set the security blob for msg
+ * @msq: the object
+ *
+ * Returns 0
+ */
+static int smack_msg_queue_alloc_security(struct msg_queue *msq)
+{
+	struct kern_ipc_perm *kisp = &msq->q_perm;
+	struct smack_known *skp = smk_of_current();
+
+	kisp->security = skp;
+	return 0;
+}
+
+/**
+ * smack_msg_free_security - Clear the security blob for msg
+ * @msq: the object
+ *
+ * Clears the blob pointer
+ */
+static void smack_msg_queue_free_security(struct msg_queue *msq)
+{
+	struct kern_ipc_perm *kisp = &msq->q_perm;
+
+	kisp->security = NULL;
+}
+
+/**
+ * smack_of_msq - the smack pointer for the msq
+ * @msq: the object
+ *
+ * Returns a pointer to the smack label entry
+ */
+static struct smack_known *smack_of_msq(struct msg_queue *msq)
+{
+	return (struct smack_known *)msq->q_perm.security;
+}
+
+/**
+ * smk_curacc_msq : helper to check if current has access on msq
+ * @msq : the msq
+ * @access : access requested
+ *
+ * return 0 if current has access, error otherwise
+ */
+static int smk_curacc_msq(struct msg_queue *msq, int access)
+{
+	struct smack_known *msp = smack_of_msq(msq);
+	struct smk_audit_info ad;
+	int rc;
+
+#ifdef CONFIG_AUDIT
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
+	ad.a.u.ipc_id = msq->q_perm.id;
+#endif
+	rc = smk_curacc(msp, access, &ad);
+	rc = smk_bu_current("msq", msp, access, rc);
+	return rc;
+}
+
+/**
+ * smack_msg_queue_associate - Smack access check for msg_queue
+ * @msq: the object
+ * @msqflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_msg_queue_associate(struct msg_queue *msq, int msqflg)
+{
+	int may;
+
+	may = smack_flags_to_may(msqflg);
+	return smk_curacc_msq(msq, may);
+}
+
+/**
+ * smack_msg_queue_msgctl - Smack access check for msg_queue
+ * @msq: the object
+ * @cmd: what it wants to do
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_msg_queue_msgctl(struct msg_queue *msq, int cmd)
+{
+	int may;
+
+	switch (cmd) {
+	case IPC_STAT:
+	case MSG_STAT:
+		may = MAY_READ;
+		break;
+	case IPC_SET:
+	case IPC_RMID:
+		may = MAY_READWRITE;
+		break;
+	case IPC_INFO:
+	case MSG_INFO:
+		/*
+		 * System level information
+		 */
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	return smk_curacc_msq(msq, may);
+}
+
+/**
+ * smack_msg_queue_msgsnd - Smack access check for msg_queue
+ * @msq: the object
+ * @msg: unused
+ * @msqflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
+				  int msqflg)
+{
+	int may;
+
+	may = smack_flags_to_may(msqflg);
+	return smk_curacc_msq(msq, may);
+}
+
+/**
+ * smack_msg_queue_msgsnd - Smack access check for msg_queue
+ * @msq: the object
+ * @msg: unused
+ * @target: unused
+ * @type: unused
+ * @mode: unused
+ *
+ * Returns 0 if current has read and write access, error code otherwise
+ */
+static int smack_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
+			struct task_struct *target, long type, int mode)
+{
+	return smk_curacc_msq(msq, MAY_READWRITE);
+}
+
+/**
+ * smack_ipc_permission - Smack access for ipc_permission()
+ * @ipp: the object permissions
+ * @flag: access requested
+ *
+ * Returns 0 if current has read and write access, error code otherwise
+ */
+static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag)
+{
+	struct smack_known *iskp = ipp->security;
+	int may = smack_flags_to_may(flag);
+	struct smk_audit_info ad;
+	int rc;
+
+#ifdef CONFIG_AUDIT
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
+	ad.a.u.ipc_id = ipp->id;
+#endif
+	rc = smk_curacc(iskp, may, &ad);
+	rc = smk_bu_current("svipc", iskp, may, rc);
+	return rc;
+}
+
+/**
+ * smack_ipc_getsecid - Extract smack security id
+ * @ipp: the object permissions
+ * @secid: where result will be saved
+ */
+static void smack_ipc_getsecid(struct kern_ipc_perm *ipp, u32 *secid)
+{
+	struct smack_known *iskp = ipp->security;
+
+	*secid = iskp->smk_secid;
+}
+
+/**
+ * smack_d_instantiate - Make sure the blob is correct on an inode
+ * @opt_dentry: dentry where inode will be attached
+ * @inode: the object
+ *
+ * Set the inode's security blob if it hasn't been done already.
+ */
+static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
+{
+	struct super_block *sbp;
+	struct superblock_smack *sbsp;
+	struct inode_smack *isp;
+	struct smack_known *skp;
+	struct smack_known *ckp = smk_of_current();
+	struct smack_known *final;
+	char trattr[TRANS_TRUE_SIZE];
+	int transflag = 0;
+	int rc;
+	struct dentry *dp;
+
+	if (inode == NULL)
+		return;
+
+	isp = inode->i_security;
+
+	mutex_lock(&isp->smk_lock);
+	/*
+	 * If the inode is already instantiated
+	 * take the quick way out
+	 */
+	if (isp->smk_flags & SMK_INODE_INSTANT)
+		goto unlockandout;
+
+	sbp = inode->i_sb;
+	sbsp = sbp->s_security;
+	/*
+	 * We're going to use the superblock default label
+	 * if there's no label on the file.
+	 */
+	final = sbsp->smk_default;
+
+	/*
+	 * If this is the root inode the superblock
+	 * may be in the process of initialization.
+	 * If that is the case use the root value out
+	 * of the superblock.
+	 */
+	if (opt_dentry->d_parent == opt_dentry) {
+		switch (sbp->s_magic) {
+		case CGROUP_SUPER_MAGIC:
+			/*
+			 * The cgroup filesystem is never mounted,
+			 * so there's no opportunity to set the mount
+			 * options.
+			 */
+			sbsp->smk_root = &smack_known_star;
+			sbsp->smk_default = &smack_known_star;
+			isp->smk_inode = sbsp->smk_root;
+			break;
+		case TMPFS_MAGIC:
+			/*
+			 * What about shmem/tmpfs anonymous files with dentry
+			 * obtained from d_alloc_pseudo()?
+			 */
+			isp->smk_inode = smk_of_current();
+			break;
+		case PIPEFS_MAGIC:
+			isp->smk_inode = smk_of_current();
+			break;
+		default:
+			isp->smk_inode = sbsp->smk_root;
+			break;
+		}
+		isp->smk_flags |= SMK_INODE_INSTANT;
+		goto unlockandout;
+	}
+
+	/*
+	 * This is pretty hackish.
+	 * Casey says that we shouldn't have to do
+	 * file system specific code, but it does help
+	 * with keeping it simple.
+	 */
+	switch (sbp->s_magic) {
+	case SMACK_MAGIC:
+	case PIPEFS_MAGIC:
+	case SOCKFS_MAGIC:
+	case CGROUP_SUPER_MAGIC:
+		/*
+		 * Casey says that it's a little embarrassing
+		 * that the smack file system doesn't do
+		 * extended attributes.
+		 *
+		 * Casey says pipes are easy (?)
+		 *
+		 * Socket access is controlled by the socket
+		 * structures associated with the task involved.
+		 *
+		 * Cgroupfs is special
+		 */
+		final = &smack_known_star;
+		break;
+	case DEVPTS_SUPER_MAGIC:
+		/*
+		 * devpts seems content with the label of the task.
+		 * Programs that change smack have to treat the
+		 * pty with respect.
+		 */
+		final = ckp;
+		break;
+	case PROC_SUPER_MAGIC:
+		/*
+		 * Casey says procfs appears not to care.
+		 * The superblock default suffices.
+		 */
+		break;
+	case TMPFS_MAGIC:
+		/*
+		 * Device labels should come from the filesystem,
+		 * but watch out, because they're volitile,
+		 * getting recreated on every reboot.
+		 */
+		final = &smack_known_star;
+		/*
+		 * No break.
+		 *
+		 * If a smack value has been set we want to use it,
+		 * but since tmpfs isn't giving us the opportunity
+		 * to set mount options simulate setting the
+		 * superblock default.
+		 */
+	default:
+		/*
+		 * This isn't an understood special case.
+		 * Get the value from the xattr.
+		 */
+
+		/*
+		 * UNIX domain sockets use lower level socket data.
+		 */
+		if (S_ISSOCK(inode->i_mode)) {
+			final = &smack_known_star;
+			break;
+		}
+		/*
+		 * No xattr support means, alas, no SMACK label.
+		 * Use the aforeapplied default.
+		 * It would be curious if the label of the task
+		 * does not match that assigned.
+		 */
+		if (inode->i_op->getxattr == NULL)
+			break;
+		/*
+		 * Get the dentry for xattr.
+		 */
+		dp = dget(opt_dentry);
+		skp = smk_fetch(XATTR_NAME_SMACK, inode, dp);
+		if (!IS_ERR_OR_NULL(skp))
+			final = skp;
+
+		/*
+		 * Transmuting directory
+		 */
+		if (S_ISDIR(inode->i_mode)) {
+			/*
+			 * If this is a new directory and the label was
+			 * transmuted when the inode was initialized
+			 * set the transmute attribute on the directory
+			 * and mark the inode.
+			 *
+			 * If there is a transmute attribute on the
+			 * directory mark the inode.
+			 */
+			if (isp->smk_flags & SMK_INODE_CHANGED) {
+				isp->smk_flags &= ~SMK_INODE_CHANGED;
+				rc = inode->i_op->setxattr(dp,
+					XATTR_NAME_SMACKTRANSMUTE,
+					TRANS_TRUE, TRANS_TRUE_SIZE,
+					0);
+			} else {
+				rc = inode->i_op->getxattr(dp,
+					XATTR_NAME_SMACKTRANSMUTE, trattr,
+					TRANS_TRUE_SIZE);
+				if (rc >= 0 && strncmp(trattr, TRANS_TRUE,
+						       TRANS_TRUE_SIZE) != 0)
+					rc = -EINVAL;
+			}
+			if (rc >= 0)
+				transflag = SMK_INODE_TRANSMUTE;
+		}
+		/*
+		 * Don't let the exec or mmap label be "*" or "@".
+		 */
+		skp = smk_fetch(XATTR_NAME_SMACKEXEC, inode, dp);
+		if (IS_ERR(skp) || skp == &smack_known_star ||
+		    skp == &smack_known_web)
+			skp = NULL;
+		isp->smk_task = skp;
+
+		skp = smk_fetch(XATTR_NAME_SMACKMMAP, inode, dp);
+		if (IS_ERR(skp) || skp == &smack_known_star ||
+		    skp == &smack_known_web)
+			skp = NULL;
+		isp->smk_mmap = skp;
+
+		dput(dp);
+		break;
+	}
+
+	if (final == NULL)
+		isp->smk_inode = ckp;
+	else
+		isp->smk_inode = final;
+
+	isp->smk_flags |= (SMK_INODE_INSTANT | transflag);
+
+unlockandout:
+	mutex_unlock(&isp->smk_lock);
+	return;
+}
+
+/**
+ * smack_getprocattr - Smack process attribute access
+ * @p: the object task
+ * @name: the name of the attribute in /proc/.../attr
+ * @value: where to put the result
+ *
+ * Places a copy of the task Smack into value
+ *
+ * Returns the length of the smack label or an error code
+ */
+static int smack_getprocattr(struct task_struct *p, char *name, char **value)
+{
+	struct smack_known *skp = smk_of_task_struct(p);
+	char *cp;
+	int slen;
+
+	if (strcmp(name, "current") != 0)
+		return -EINVAL;
+
+	cp = kstrdup(skp->smk_known, GFP_KERNEL);
+	if (cp == NULL)
+		return -ENOMEM;
+
+	slen = strlen(cp);
+	*value = cp;
+	return slen;
+}
+
+/**
+ * smack_setprocattr - Smack process attribute setting
+ * @p: the object task
+ * @name: the name of the attribute in /proc/.../attr
+ * @value: the value to set
+ * @size: the size of the value
+ *
+ * Sets the Smack value of the task. Only setting self
+ * is permitted and only with privilege
+ *
+ * Returns the length of the smack label or an error code
+ */
+static int smack_setprocattr(struct task_struct *p, char *name,
+			     void *value, size_t size)
+{
+	struct task_smack *tsp = current_security();
+	struct cred *new;
+	struct smack_known *skp;
+	struct smack_known_list_elem *sklep;
+	int rc;
+
+	/*
+	 * Changing another process' Smack value is too dangerous
+	 * and supports no sane use case.
+	 */
+	if (p != current)
+		return -EPERM;
+
+	if (!smack_privileged(CAP_MAC_ADMIN) && list_empty(&tsp->smk_relabel))
+		return -EPERM;
+
+	if (value == NULL || size == 0 || size >= SMK_LONGLABEL)
+		return -EINVAL;
+
+	if (strcmp(name, "current") != 0)
+		return -EINVAL;
+
+	skp = smk_import_entry(value, size);
+	if (IS_ERR(skp))
+		return PTR_ERR(skp);
+
+	/*
+	 * No process is ever allowed the web ("@") label.
+	 */
+	if (skp == &smack_known_web)
+		return -EPERM;
+
+	if (!smack_privileged(CAP_MAC_ADMIN)) {
+		rc = -EPERM;
+		list_for_each_entry(sklep, &tsp->smk_relabel, list)
+			if (sklep->smk_label == skp) {
+				rc = 0;
+				break;
+			}
+		if (rc)
+			return rc;
+	}
+
+	new = prepare_creds();
+	if (new == NULL)
+		return -ENOMEM;
+
+	tsp = new->security;
+	tsp->smk_task = skp;
+	/*
+	 * process can change its label only once
+	 */
+	smk_destroy_label_list(&tsp->smk_relabel);
+
+	commit_creds(new);
+	return size;
+}
+
+/**
+ * smack_unix_stream_connect - Smack access on UDS
+ * @sock: one sock
+ * @other: the other sock
+ * @newsk: unused
+ *
+ * Return 0 if a subject with the smack of sock could access
+ * an object with the smack of other, otherwise an error code
+ */
+static int smack_unix_stream_connect(struct sock *sock,
+				     struct sock *other, struct sock *newsk)
+{
+	struct smack_known *skp;
+	struct smack_known *okp;
+	struct socket_smack *ssp = sock->sk_security;
+	struct socket_smack *osp = other->sk_security;
+	struct socket_smack *nsp = newsk->sk_security;
+	struct smk_audit_info ad;
+	int rc = 0;
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+#endif
+
+	if (!smack_privileged(CAP_MAC_OVERRIDE)) {
+		skp = ssp->smk_out;
+		okp = osp->smk_in;
+#ifdef CONFIG_AUDIT
+		smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+		smk_ad_setfield_u_net_sk(&ad, other);
+#endif
+		rc = smk_access(skp, okp, MAY_WRITE, &ad);
+		rc = smk_bu_note("UDS connect", skp, okp, MAY_WRITE, rc);
+		if (rc == 0) {
+			okp = osp->smk_out;
+			skp = ssp->smk_in;
+			rc = smk_access(okp, skp, MAY_WRITE, &ad);
+			rc = smk_bu_note("UDS connect", okp, skp,
+						MAY_WRITE, rc);
+		}
+	}
+
+	/*
+	 * Cross reference the peer labels for SO_PEERSEC.
+	 */
+	if (rc == 0) {
+		nsp->smk_packet = ssp->smk_out;
+		ssp->smk_packet = osp->smk_out;
+	}
+
+	return rc;
+}
+
+/**
+ * smack_unix_may_send - Smack access on UDS
+ * @sock: one socket
+ * @other: the other socket
+ *
+ * Return 0 if a subject with the smack of sock could access
+ * an object with the smack of other, otherwise an error code
+ */
+static int smack_unix_may_send(struct socket *sock, struct socket *other)
+{
+	struct socket_smack *ssp = sock->sk->sk_security;
+	struct socket_smack *osp = other->sk->sk_security;
+	struct smk_audit_info ad;
+	int rc;
+
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+
+	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+	smk_ad_setfield_u_net_sk(&ad, other->sk);
+#endif
+
+	if (smack_privileged(CAP_MAC_OVERRIDE))
+		return 0;
+
+	rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
+	rc = smk_bu_note("UDS send", ssp->smk_out, osp->smk_in, MAY_WRITE, rc);
+	return rc;
+}
+
+/**
+ * smack_socket_sendmsg - Smack check based on destination host
+ * @sock: the socket
+ * @msg: the message
+ * @size: the size of the message
+ *
+ * Return 0 if the current subject can write to the destination host.
+ * For IPv4 this is only a question if the destination is a single label host.
+ * For IPv6 this is a check against the label of the port.
+ */
+static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+				int size)
+{
+	struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
+#if IS_ENABLED(CONFIG_IPV6)
+	struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
+#endif
+#ifdef SMACK_IPV6_SECMARK_LABELING
+	struct socket_smack *ssp = sock->sk->sk_security;
+	struct smack_known *rsp;
+#endif
+	int rc = 0;
+
+	/*
+	 * Perfectly reasonable for this to be NULL
+	 */
+	if (sip == NULL)
+		return 0;
+
+	switch (sip->sin_family) {
+	case AF_INET:
+		rc = smack_netlabel_send(sock->sk, sip);
+		break;
+	case AF_INET6:
+#ifdef SMACK_IPV6_SECMARK_LABELING
+		rsp = smack_ipv6host_label(sap);
+		if (rsp != NULL)
+			rc = smk_ipv6_check(ssp->smk_out, rsp, sap,
+						SMK_CONNECTING);
+#endif
+#ifdef SMACK_IPV6_PORT_LABELING
+		rc = smk_ipv6_port_check(sock->sk, sap, SMK_SENDING);
+#endif
+		break;
+	}
+	return rc;
+}
+
+/**
+ * smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack
+ * @sap: netlabel secattr
+ * @ssp: socket security information
+ *
+ * Returns a pointer to a Smack label entry found on the label list.
+ */
+static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
+						struct socket_smack *ssp)
+{
+	struct smack_known *skp;
+	int found = 0;
+	int acat;
+	int kcat;
+
+	if ((sap->flags & NETLBL_SECATTR_MLS_LVL) != 0) {
+		/*
+		 * Looks like a CIPSO packet.
+		 * If there are flags but no level netlabel isn't
+		 * behaving the way we expect it to.
+		 *
+		 * Look it up in the label table
+		 * Without guidance regarding the smack value
+		 * for the packet fall back on the network
+		 * ambient value.
+		 */
+		rcu_read_lock();
+		list_for_each_entry(skp, &smack_known_list, list) {
+			if (sap->attr.mls.lvl != skp->smk_netlabel.attr.mls.lvl)
+				continue;
+			/*
+			 * Compare the catsets. Use the netlbl APIs.
+			 */
+			if ((sap->flags & NETLBL_SECATTR_MLS_CAT) == 0) {
+				if ((skp->smk_netlabel.flags &
+				     NETLBL_SECATTR_MLS_CAT) == 0)
+					found = 1;
+				break;
+			}
+			for (acat = -1, kcat = -1; acat == kcat; ) {
+				acat = netlbl_catmap_walk(sap->attr.mls.cat,
+							  acat + 1);
+				kcat = netlbl_catmap_walk(
+					skp->smk_netlabel.attr.mls.cat,
+					kcat + 1);
+				if (acat < 0 || kcat < 0)
+					break;
+			}
+			if (acat == kcat) {
+				found = 1;
+				break;
+			}
+		}
+		rcu_read_unlock();
+
+		if (found)
+			return skp;
+
+		if (ssp != NULL && ssp->smk_in == &smack_known_star)
+			return &smack_known_web;
+		return &smack_known_star;
+	}
+	if ((sap->flags & NETLBL_SECATTR_SECID) != 0) {
+		/*
+		 * Looks like a fallback, which gives us a secid.
+		 */
+		skp = smack_from_secid(sap->attr.secid);
+		/*
+		 * This has got to be a bug because it is
+		 * impossible to specify a fallback without
+		 * specifying the label, which will ensure
+		 * it has a secid, and the only way to get a
+		 * secid is from a fallback.
+		 */
+		BUG_ON(skp == NULL);
+		return skp;
+	}
+	/*
+	 * Without guidance regarding the smack value
+	 * for the packet fall back on the network
+	 * ambient value.
+	 */
+	return smack_net_ambient;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
+{
+	u8 nexthdr;
+	int offset;
+	int proto = -EINVAL;
+	struct ipv6hdr _ipv6h;
+	struct ipv6hdr *ip6;
+	__be16 frag_off;
+	struct tcphdr _tcph, *th;
+	struct udphdr _udph, *uh;
+	struct dccp_hdr _dccph, *dh;
+
+	sip->sin6_port = 0;
+
+	offset = skb_network_offset(skb);
+	ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+	if (ip6 == NULL)
+		return -EINVAL;
+	sip->sin6_addr = ip6->saddr;
+
+	nexthdr = ip6->nexthdr;
+	offset += sizeof(_ipv6h);
+	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+	if (offset < 0)
+		return -EINVAL;
+
+	proto = nexthdr;
+	switch (proto) {
+	case IPPROTO_TCP:
+		th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+		if (th != NULL)
+			sip->sin6_port = th->source;
+		break;
+	case IPPROTO_UDP:
+		uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+		if (uh != NULL)
+			sip->sin6_port = uh->source;
+		break;
+	case IPPROTO_DCCP:
+		dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+		if (dh != NULL)
+			sip->sin6_port = dh->dccph_sport;
+		break;
+	}
+	return proto;
+}
+#endif /* CONFIG_IPV6 */
+
+/**
+ * smack_socket_sock_rcv_skb - Smack packet delivery access check
+ * @sk: socket
+ * @skb: packet
+ *
+ * Returns 0 if the packet should be delivered, an error code otherwise
+ */
+static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+	struct netlbl_lsm_secattr secattr;
+	struct socket_smack *ssp = sk->sk_security;
+	struct smack_known *skp = NULL;
+	int rc = 0;
+	struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
+	struct sockaddr_in6 sadd;
+	int proto;
+#endif /* CONFIG_IPV6 */
+
+	switch (sk->sk_family) {
+	case PF_INET:
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+		/*
+		 * If there is a secmark use it rather than the CIPSO label.
+		 * If there is no secmark fall back to CIPSO.
+		 * The secmark is assumed to reflect policy better.
+		 */
+		if (skb && skb->secmark != 0) {
+			skp = smack_from_secid(skb->secmark);
+			goto access_check;
+		}
+#endif /* CONFIG_SECURITY_SMACK_NETFILTER */
+		/*
+		 * Translate what netlabel gave us.
+		 */
+		netlbl_secattr_init(&secattr);
+
+		rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
+		if (rc == 0)
+			skp = smack_from_secattr(&secattr, ssp);
+		else
+			skp = smack_net_ambient;
+
+		netlbl_secattr_destroy(&secattr);
+
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+access_check:
+#endif
+#ifdef CONFIG_AUDIT
+		smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+		ad.a.u.net->family = sk->sk_family;
+		ad.a.u.net->netif = skb->skb_iif;
+		ipv4_skb_to_auditdata(skb, &ad.a, NULL);
+#endif
+		/*
+		 * Receiving a packet requires that the other end
+		 * be able to write here. Read access is not required.
+		 * This is the simplist possible security model
+		 * for networking.
+		 */
+		rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
+		rc = smk_bu_note("IPv4 delivery", skp, ssp->smk_in,
+					MAY_WRITE, rc);
+		if (rc != 0)
+			netlbl_skbuff_err(skb, rc, 0);
+		break;
+#if IS_ENABLED(CONFIG_IPV6)
+	case PF_INET6:
+		proto = smk_skb_to_addr_ipv6(skb, &sadd);
+		if (proto != IPPROTO_UDP && proto != IPPROTO_TCP)
+			break;
+#ifdef SMACK_IPV6_SECMARK_LABELING
+		if (skb && skb->secmark != 0)
+			skp = smack_from_secid(skb->secmark);
+		else
+			skp = smack_ipv6host_label(&sadd);
+		if (skp == NULL)
+			skp = smack_net_ambient;
+#ifdef CONFIG_AUDIT
+		smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+		ad.a.u.net->family = sk->sk_family;
+		ad.a.u.net->netif = skb->skb_iif;
+		ipv6_skb_to_auditdata(skb, &ad.a, NULL);
+#endif /* CONFIG_AUDIT */
+		rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
+		rc = smk_bu_note("IPv6 delivery", skp, ssp->smk_in,
+					MAY_WRITE, rc);
+#endif /* SMACK_IPV6_SECMARK_LABELING */
+#ifdef SMACK_IPV6_PORT_LABELING
+		rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING);
+#endif /* SMACK_IPV6_PORT_LABELING */
+		break;
+#endif /* CONFIG_IPV6 */
+	}
+
+	return rc;
+}
+
+/**
+ * smack_socket_getpeersec_stream - pull in packet label
+ * @sock: the socket
+ * @optval: user's destination
+ * @optlen: size thereof
+ * @len: max thereof
+ *
+ * returns zero on success, an error code otherwise
+ */
+static int smack_socket_getpeersec_stream(struct socket *sock,
+					  char __user *optval,
+					  int __user *optlen, unsigned len)
+{
+	struct socket_smack *ssp;
+	char *rcp = "";
+	int slen = 1;
+	int rc = 0;
+
+	ssp = sock->sk->sk_security;
+	if (ssp->smk_packet != NULL) {
+		rcp = ssp->smk_packet->smk_known;
+		slen = strlen(rcp) + 1;
+	}
+
+	if (slen > len)
+		rc = -ERANGE;
+	else if (copy_to_user(optval, rcp, slen) != 0)
+		rc = -EFAULT;
+
+	if (put_user(slen, optlen) != 0)
+		rc = -EFAULT;
+
+	return rc;
+}
+
+
+/**
+ * smack_socket_getpeersec_dgram - pull in packet label
+ * @sock: the peer socket
+ * @skb: packet data
+ * @secid: pointer to where to put the secid of the packet
+ *
+ * Sets the netlabel socket state on sk from parent
+ */
+static int smack_socket_getpeersec_dgram(struct socket *sock,
+					 struct sk_buff *skb, u32 *secid)
+
+{
+	struct netlbl_lsm_secattr secattr;
+	struct socket_smack *ssp = NULL;
+	struct smack_known *skp;
+	int family = PF_UNSPEC;
+	u32 s = 0;	/* 0 is the invalid secid */
+	int rc;
+
+	if (skb != NULL) {
+		if (skb->protocol == htons(ETH_P_IP))
+			family = PF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+		else if (skb->protocol == htons(ETH_P_IPV6))
+			family = PF_INET6;
+#endif /* CONFIG_IPV6 */
+	}
+	if (family == PF_UNSPEC && sock != NULL)
+		family = sock->sk->sk_family;
+
+	switch (family) {
+	case PF_UNIX:
+		ssp = sock->sk->sk_security;
+		s = ssp->smk_out->smk_secid;
+		break;
+	case PF_INET:
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+		s = skb->secmark;
+		if (s != 0)
+			break;
+#endif
+		/*
+		 * Translate what netlabel gave us.
+		 */
+		if (sock != NULL && sock->sk != NULL)
+			ssp = sock->sk->sk_security;
+		netlbl_secattr_init(&secattr);
+		rc = netlbl_skbuff_getattr(skb, family, &secattr);
+		if (rc == 0) {
+			skp = smack_from_secattr(&secattr, ssp);
+			s = skp->smk_secid;
+		}
+		netlbl_secattr_destroy(&secattr);
+		break;
+	case PF_INET6:
+#ifdef SMACK_IPV6_SECMARK_LABELING
+		s = skb->secmark;
+#endif
+		break;
+	}
+	*secid = s;
+	if (s == 0)
+		return -EINVAL;
+	return 0;
+}
+
+/**
+ * smack_sock_graft - Initialize a newly created socket with an existing sock
+ * @sk: child sock
+ * @parent: parent socket
+ *
+ * Set the smk_{in,out} state of an existing sock based on the process that
+ * is creating the new socket.
+ */
+static void smack_sock_graft(struct sock *sk, struct socket *parent)
+{
+	struct socket_smack *ssp;
+	struct smack_known *skp = smk_of_current();
+
+	if (sk == NULL ||
+	    (sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
+		return;
+
+	ssp = sk->sk_security;
+	ssp->smk_in = skp;
+	ssp->smk_out = skp;
+	/* cssp->smk_packet is already set in smack_inet_csk_clone() */
+}
+
+/**
+ * smack_inet_conn_request - Smack access check on connect
+ * @sk: socket involved
+ * @skb: packet
+ * @req: unused
+ *
+ * Returns 0 if a task with the packet label could write to
+ * the socket, otherwise an error code
+ */
+static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+				   struct request_sock *req)
+{
+	u16 family = sk->sk_family;
+	struct smack_known *skp;
+	struct socket_smack *ssp = sk->sk_security;
+	struct netlbl_lsm_secattr secattr;
+	struct sockaddr_in addr;
+	struct iphdr *hdr;
+	struct smack_known *hskp;
+	int rc;
+	struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+	if (family == PF_INET6) {
+		/*
+		 * Handle mapped IPv4 packets arriving
+		 * via IPv6 sockets. Don't set up netlabel
+		 * processing on IPv6.
+		 */
+		if (skb->protocol == htons(ETH_P_IP))
+			family = PF_INET;
+		else
+			return 0;
+	}
+#endif /* CONFIG_IPV6 */
+
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+	/*
+	 * If there is a secmark use it rather than the CIPSO label.
+	 * If there is no secmark fall back to CIPSO.
+	 * The secmark is assumed to reflect policy better.
+	 */
+	if (skb && skb->secmark != 0) {
+		skp = smack_from_secid(skb->secmark);
+		goto access_check;
+	}
+#endif /* CONFIG_SECURITY_SMACK_NETFILTER */
+
+	netlbl_secattr_init(&secattr);
+	rc = netlbl_skbuff_getattr(skb, family, &secattr);
+	if (rc == 0)
+		skp = smack_from_secattr(&secattr, ssp);
+	else
+		skp = &smack_known_huh;
+	netlbl_secattr_destroy(&secattr);
+
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+access_check:
+#endif
+
+#ifdef CONFIG_AUDIT
+	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+	ad.a.u.net->family = family;
+	ad.a.u.net->netif = skb->skb_iif;
+	ipv4_skb_to_auditdata(skb, &ad.a, NULL);
+#endif
+	/*
+	 * Receiving a packet requires that the other end be able to write
+	 * here. Read access is not required.
+	 */
+	rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
+	rc = smk_bu_note("IPv4 connect", skp, ssp->smk_in, MAY_WRITE, rc);
+	if (rc != 0)
+		return rc;
+
+	/*
+	 * Save the peer's label in the request_sock so we can later setup
+	 * smk_packet in the child socket so that SO_PEERCRED can report it.
+	 */
+	req->peer_secid = skp->smk_secid;
+
+	/*
+	 * We need to decide if we want to label the incoming connection here
+	 * if we do we only need to label the request_sock and the stack will
+	 * propagate the wire-label to the sock when it is created.
+	 */
+	hdr = ip_hdr(skb);
+	addr.sin_addr.s_addr = hdr->saddr;
+	rcu_read_lock();
+	hskp = smack_ipv4host_label(&addr);
+	rcu_read_unlock();
+
+	if (hskp == NULL)
+		rc = netlbl_req_setattr(req, &skp->smk_netlabel);
+	else
+		netlbl_req_delattr(req);
+
+	return rc;
+}
+
+/**
+ * smack_inet_csk_clone - Copy the connection information to the new socket
+ * @sk: the new socket
+ * @req: the connection's request_sock
+ *
+ * Transfer the connection's peer label to the newly created socket.
+ */
+static void smack_inet_csk_clone(struct sock *sk,
+				 const struct request_sock *req)
+{
+	struct socket_smack *ssp = sk->sk_security;
+	struct smack_known *skp;
+
+	if (req->peer_secid != 0) {
+		skp = smack_from_secid(req->peer_secid);
+		ssp->smk_packet = skp;
+	} else
+		ssp->smk_packet = NULL;
+}
+
+/*
+ * Key management security hooks
+ *
+ * Casey has not tested key support very heavily.
+ * The permission check is most likely too restrictive.
+ * If you care about keys please have a look.
+ */
+#ifdef CONFIG_KEYS
+
+/**
+ * smack_key_alloc - Set the key security blob
+ * @key: object
+ * @cred: the credentials to use
+ * @flags: unused
+ *
+ * No allocation required
+ *
+ * Returns 0
+ */
+static int smack_key_alloc(struct key *key, const struct cred *cred,
+			   unsigned long flags)
+{
+	struct smack_known *skp = smk_of_task(cred->security);
+
+	key->security = skp;
+	return 0;
+}
+
+/**
+ * smack_key_free - Clear the key security blob
+ * @key: the object
+ *
+ * Clear the blob pointer
+ */
+static void smack_key_free(struct key *key)
+{
+	key->security = NULL;
+}
+
+/**
+ * smack_key_permission - Smack access on a key
+ * @key_ref: gets to the object
+ * @cred: the credentials to use
+ * @perm: requested key permissions
+ *
+ * Return 0 if the task has read and write to the object,
+ * an error code otherwise
+ */
+static int smack_key_permission(key_ref_t key_ref,
+				const struct cred *cred, unsigned perm)
+{
+	struct key *keyp;
+	struct smk_audit_info ad;
+	struct smack_known *tkp = smk_of_task(cred->security);
+	int request = 0;
+	int rc;
+
+	keyp = key_ref_to_ptr(key_ref);
+	if (keyp == NULL)
+		return -EINVAL;
+	/*
+	 * If the key hasn't been initialized give it access so that
+	 * it may do so.
+	 */
+	if (keyp->security == NULL)
+		return 0;
+	/*
+	 * This should not occur
+	 */
+	if (tkp == NULL)
+		return -EACCES;
+#ifdef CONFIG_AUDIT
+	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY);
+	ad.a.u.key_struct.key = keyp->serial;
+	ad.a.u.key_struct.key_desc = keyp->description;
+#endif
+	if (perm & KEY_NEED_READ)
+		request = MAY_READ;
+	if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR))
+		request = MAY_WRITE;
+	rc = smk_access(tkp, keyp->security, request, &ad);
+	rc = smk_bu_note("key access", tkp, keyp->security, request, rc);
+	return rc;
+}
+
+/*
+ * smack_key_getsecurity - Smack label tagging the key
+ * @key points to the key to be queried
+ * @_buffer points to a pointer that should be set to point to the
+ * resulting string (if no label or an error occurs).
+ * Return the length of the string (including terminating NUL) or -ve if
+ * an error.
+ * May also return 0 (and a NULL buffer pointer) if there is no label.
+ */
+static int smack_key_getsecurity(struct key *key, char **_buffer)
+{
+	struct smack_known *skp = key->security;
+	size_t length;
+	char *copy;
+
+	if (key->security == NULL) {
+		*_buffer = NULL;
+		return 0;
+	}
+
+	copy = kstrdup(skp->smk_known, GFP_KERNEL);
+	if (copy == NULL)
+		return -ENOMEM;
+	length = strlen(copy) + 1;
+
+	*_buffer = copy;
+	return length;
+}
+
+#endif /* CONFIG_KEYS */
+
+/*
+ * Smack Audit hooks
+ *
+ * Audit requires a unique representation of each Smack specific
+ * rule. This unique representation is used to distinguish the
+ * object to be audited from remaining kernel objects and also
+ * works as a glue between the audit hooks.
+ *
+ * Since repository entries are added but never deleted, we'll use
+ * the smack_known label address related to the given audit rule as
+ * the needed unique representation. This also better fits the smack
+ * model where nearly everything is a label.
+ */
+#ifdef CONFIG_AUDIT
+
+/**
+ * smack_audit_rule_init - Initialize a smack audit rule
+ * @field: audit rule fields given from user-space (audit.h)
+ * @op: required testing operator (=, !=, >, <, ...)
+ * @rulestr: smack label to be audited
+ * @vrule: pointer to save our own audit rule representation
+ *
+ * Prepare to audit cases where (@field @op @rulestr) is true.
+ * The label to be audited is created if necessay.
+ */
+static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+{
+	struct smack_known *skp;
+	char **rule = (char **)vrule;
+	*rule = NULL;
+
+	if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER)
+		return -EINVAL;
+
+	if (op != Audit_equal && op != Audit_not_equal)
+		return -EINVAL;
+
+	skp = smk_import_entry(rulestr, 0);
+	if (IS_ERR(skp))
+		return PTR_ERR(skp);
+
+	*rule = skp->smk_known;
+
+	return 0;
+}
+
+/**
+ * smack_audit_rule_known - Distinguish Smack audit rules
+ * @krule: rule of interest, in Audit kernel representation format
+ *
+ * This is used to filter Smack rules from remaining Audit ones.
+ * If it's proved that this rule belongs to us, the
+ * audit_rule_match hook will be called to do the final judgement.
+ */
+static int smack_audit_rule_known(struct audit_krule *krule)
+{
+	struct audit_field *f;
+	int i;
+
+	for (i = 0; i < krule->field_count; i++) {
+		f = &krule->fields[i];
+
+		if (f->type == AUDIT_SUBJ_USER || f->type == AUDIT_OBJ_USER)
+			return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * smack_audit_rule_match - Audit given object ?
+ * @secid: security id for identifying the object to test
+ * @field: audit rule flags given from user-space
+ * @op: required testing operator
+ * @vrule: smack internal rule presentation
+ * @actx: audit context associated with the check
+ *
+ * The core Audit hook. It's used to take the decision of
+ * whether to audit or not to audit a given object.
+ */
+static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule,
+				  struct audit_context *actx)
+{
+	struct smack_known *skp;
+	char *rule = vrule;
+
+	if (unlikely(!rule)) {
+		WARN_ONCE(1, "Smack: missing rule\n");
+		return -ENOENT;
+	}
+
+	if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER)
+		return 0;
+
+	skp = smack_from_secid(secid);
+
+	/*
+	 * No need to do string comparisons. If a match occurs,
+	 * both pointers will point to the same smack_known
+	 * label.
+	 */
+	if (op == Audit_equal)
+		return (rule == skp->smk_known);
+	if (op == Audit_not_equal)
+		return (rule != skp->smk_known);
+
+	return 0;
+}
+
+/**
+ * smack_audit_rule_free - free smack rule representation
+ * @vrule: rule to be freed.
+ *
+ * No memory was allocated.
+ */
+static void smack_audit_rule_free(void *vrule)
+{
+	/* No-op */
+}
+
+#endif /* CONFIG_AUDIT */
+
+/**
+ * smack_ismaclabel - check if xattr @name references a smack MAC label
+ * @name: Full xattr name to check.
+ */
+static int smack_ismaclabel(const char *name)
+{
+	return (strcmp(name, XATTR_SMACK_SUFFIX) == 0);
+}
+
+
+/**
+ * smack_secid_to_secctx - return the smack label for a secid
+ * @secid: incoming integer
+ * @secdata: destination
+ * @seclen: how long it is
+ *
+ * Exists for networking code.
+ */
+static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+	struct smack_known *skp = smack_from_secid(secid);
+
+	if (secdata)
+		*secdata = skp->smk_known;
+	*seclen = strlen(skp->smk_known);
+	return 0;
+}
+
+/**
+ * smack_secctx_to_secid - return the secid for a smack label
+ * @secdata: smack label
+ * @seclen: how long result is
+ * @secid: outgoing integer
+ *
+ * Exists for audit and networking code.
+ */
+static int smack_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
+{
+	struct smack_known *skp = smk_find_entry(secdata);
+
+	if (skp)
+		*secid = skp->smk_secid;
+	else
+		*secid = 0;
+	return 0;
+}
+
+/**
+ * smack_release_secctx - don't do anything.
+ * @secdata: unused
+ * @seclen: unused
+ *
+ * Exists to make sure nothing gets done, and properly
+ */
+static void smack_release_secctx(char *secdata, u32 seclen)
+{
+}
+
+static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+	return smack_inode_setsecurity(inode, XATTR_SMACK_SUFFIX, ctx, ctxlen, 0);
+}
+
+static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+	return __vfs_setxattr_noperm(dentry, XATTR_NAME_SMACK, ctx, ctxlen, 0);
+}
+
+static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+	int len = 0;
+	len = smack_inode_getsecurity(inode, XATTR_SMACK_SUFFIX, ctx, true);
+
+	if (len < 0)
+		return len;
+	*ctxlen = len;
+	return 0;
+}
+
+static struct security_hook_list smack_hooks[] = {
+	LSM_HOOK_INIT(ptrace_access_check, smack_ptrace_access_check),
+	LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
+	LSM_HOOK_INIT(syslog, smack_syslog),
+
+	LSM_HOOK_INIT(sb_alloc_security, smack_sb_alloc_security),
+	LSM_HOOK_INIT(sb_free_security, smack_sb_free_security),
+	LSM_HOOK_INIT(sb_copy_data, smack_sb_copy_data),
+	LSM_HOOK_INIT(sb_kern_mount, smack_sb_kern_mount),
+	LSM_HOOK_INIT(sb_statfs, smack_sb_statfs),
+	LSM_HOOK_INIT(sb_set_mnt_opts, smack_set_mnt_opts),
+	LSM_HOOK_INIT(sb_parse_opts_str, smack_parse_opts_str),
+
+	LSM_HOOK_INIT(bprm_set_creds, smack_bprm_set_creds),
+	LSM_HOOK_INIT(bprm_committing_creds, smack_bprm_committing_creds),
+	LSM_HOOK_INIT(bprm_secureexec, smack_bprm_secureexec),
+
+	LSM_HOOK_INIT(inode_alloc_security, smack_inode_alloc_security),
+	LSM_HOOK_INIT(inode_free_security, smack_inode_free_security),
+	LSM_HOOK_INIT(inode_init_security, smack_inode_init_security),
+	LSM_HOOK_INIT(inode_link, smack_inode_link),
+	LSM_HOOK_INIT(inode_unlink, smack_inode_unlink),
+	LSM_HOOK_INIT(inode_rmdir, smack_inode_rmdir),
+	LSM_HOOK_INIT(inode_rename, smack_inode_rename),
+	LSM_HOOK_INIT(inode_permission, smack_inode_permission),
+	LSM_HOOK_INIT(inode_setattr, smack_inode_setattr),
+	LSM_HOOK_INIT(inode_getattr, smack_inode_getattr),
+	LSM_HOOK_INIT(inode_setxattr, smack_inode_setxattr),
+	LSM_HOOK_INIT(inode_post_setxattr, smack_inode_post_setxattr),
+	LSM_HOOK_INIT(inode_getxattr, smack_inode_getxattr),
+	LSM_HOOK_INIT(inode_removexattr, smack_inode_removexattr),
+	LSM_HOOK_INIT(inode_getsecurity, smack_inode_getsecurity),
+	LSM_HOOK_INIT(inode_setsecurity, smack_inode_setsecurity),
+	LSM_HOOK_INIT(inode_listsecurity, smack_inode_listsecurity),
+	LSM_HOOK_INIT(inode_getsecid, smack_inode_getsecid),
+
+	LSM_HOOK_INIT(file_permission, smack_file_permission),
+	LSM_HOOK_INIT(file_alloc_security, smack_file_alloc_security),
+	LSM_HOOK_INIT(file_free_security, smack_file_free_security),
+	LSM_HOOK_INIT(file_ioctl, smack_file_ioctl),
+	LSM_HOOK_INIT(file_lock, smack_file_lock),
+	LSM_HOOK_INIT(file_fcntl, smack_file_fcntl),
+	LSM_HOOK_INIT(mmap_file, smack_mmap_file),
+	LSM_HOOK_INIT(mmap_addr, cap_mmap_addr),
+	LSM_HOOK_INIT(file_set_fowner, smack_file_set_fowner),
+	LSM_HOOK_INIT(file_send_sigiotask, smack_file_send_sigiotask),
+	LSM_HOOK_INIT(file_receive, smack_file_receive),
+
+	LSM_HOOK_INIT(file_open, smack_file_open),
+
+	LSM_HOOK_INIT(cred_alloc_blank, smack_cred_alloc_blank),
+	LSM_HOOK_INIT(cred_free, smack_cred_free),
+	LSM_HOOK_INIT(cred_prepare, smack_cred_prepare),
+	LSM_HOOK_INIT(cred_transfer, smack_cred_transfer),
+	LSM_HOOK_INIT(kernel_act_as, smack_kernel_act_as),
+	LSM_HOOK_INIT(kernel_create_files_as, smack_kernel_create_files_as),
+	LSM_HOOK_INIT(task_setpgid, smack_task_setpgid),
+	LSM_HOOK_INIT(task_getpgid, smack_task_getpgid),
+	LSM_HOOK_INIT(task_getsid, smack_task_getsid),
+	LSM_HOOK_INIT(task_getsecid, smack_task_getsecid),
+	LSM_HOOK_INIT(task_setnice, smack_task_setnice),
+	LSM_HOOK_INIT(task_setioprio, smack_task_setioprio),
+	LSM_HOOK_INIT(task_getioprio, smack_task_getioprio),
+	LSM_HOOK_INIT(task_setscheduler, smack_task_setscheduler),
+	LSM_HOOK_INIT(task_getscheduler, smack_task_getscheduler),
+	LSM_HOOK_INIT(task_movememory, smack_task_movememory),
+	LSM_HOOK_INIT(task_kill, smack_task_kill),
+	LSM_HOOK_INIT(task_wait, smack_task_wait),
+	LSM_HOOK_INIT(task_to_inode, smack_task_to_inode),
+
+	LSM_HOOK_INIT(ipc_permission, smack_ipc_permission),
+	LSM_HOOK_INIT(ipc_getsecid, smack_ipc_getsecid),
+
+	LSM_HOOK_INIT(msg_msg_alloc_security, smack_msg_msg_alloc_security),
+	LSM_HOOK_INIT(msg_msg_free_security, smack_msg_msg_free_security),
+
+	LSM_HOOK_INIT(msg_queue_alloc_security, smack_msg_queue_alloc_security),
+	LSM_HOOK_INIT(msg_queue_free_security, smack_msg_queue_free_security),
+	LSM_HOOK_INIT(msg_queue_associate, smack_msg_queue_associate),
+	LSM_HOOK_INIT(msg_queue_msgctl, smack_msg_queue_msgctl),
+	LSM_HOOK_INIT(msg_queue_msgsnd, smack_msg_queue_msgsnd),
+	LSM_HOOK_INIT(msg_queue_msgrcv, smack_msg_queue_msgrcv),
+
+	LSM_HOOK_INIT(shm_alloc_security, smack_shm_alloc_security),
+	LSM_HOOK_INIT(shm_free_security, smack_shm_free_security),
+	LSM_HOOK_INIT(shm_associate, smack_shm_associate),
+	LSM_HOOK_INIT(shm_shmctl, smack_shm_shmctl),
+	LSM_HOOK_INIT(shm_shmat, smack_shm_shmat),
+
+	LSM_HOOK_INIT(sem_alloc_security, smack_sem_alloc_security),
+	LSM_HOOK_INIT(sem_free_security, smack_sem_free_security),
+	LSM_HOOK_INIT(sem_associate, smack_sem_associate),
+	LSM_HOOK_INIT(sem_semctl, smack_sem_semctl),
+	LSM_HOOK_INIT(sem_semop, smack_sem_semop),
+
+	LSM_HOOK_INIT(d_instantiate, smack_d_instantiate),
+
+	LSM_HOOK_INIT(getprocattr, smack_getprocattr),
+	LSM_HOOK_INIT(setprocattr, smack_setprocattr),
+
+	LSM_HOOK_INIT(unix_stream_connect, smack_unix_stream_connect),
+	LSM_HOOK_INIT(unix_may_send, smack_unix_may_send),
+
+	LSM_HOOK_INIT(socket_post_create, smack_socket_post_create),
+#ifdef SMACK_IPV6_PORT_LABELING
+	LSM_HOOK_INIT(socket_bind, smack_socket_bind),
+#endif
+	LSM_HOOK_INIT(socket_connect, smack_socket_connect),
+	LSM_HOOK_INIT(socket_sendmsg, smack_socket_sendmsg),
+	LSM_HOOK_INIT(socket_sock_rcv_skb, smack_socket_sock_rcv_skb),
+	LSM_HOOK_INIT(socket_getpeersec_stream, smack_socket_getpeersec_stream),
+	LSM_HOOK_INIT(socket_getpeersec_dgram, smack_socket_getpeersec_dgram),
+	LSM_HOOK_INIT(sk_alloc_security, smack_sk_alloc_security),
+	LSM_HOOK_INIT(sk_free_security, smack_sk_free_security),
+	LSM_HOOK_INIT(sock_graft, smack_sock_graft),
+	LSM_HOOK_INIT(inet_conn_request, smack_inet_conn_request),
+	LSM_HOOK_INIT(inet_csk_clone, smack_inet_csk_clone),
+
+ /* key management security hooks */
+#ifdef CONFIG_KEYS
+	LSM_HOOK_INIT(key_alloc, smack_key_alloc),
+	LSM_HOOK_INIT(key_free, smack_key_free),
+	LSM_HOOK_INIT(key_permission, smack_key_permission),
+	LSM_HOOK_INIT(key_getsecurity, smack_key_getsecurity),
+#endif /* CONFIG_KEYS */
+
+ /* Audit hooks */
+#ifdef CONFIG_AUDIT
+	LSM_HOOK_INIT(audit_rule_init, smack_audit_rule_init),
+	LSM_HOOK_INIT(audit_rule_known, smack_audit_rule_known),
+	LSM_HOOK_INIT(audit_rule_match, smack_audit_rule_match),
+	LSM_HOOK_INIT(audit_rule_free, smack_audit_rule_free),
+#endif /* CONFIG_AUDIT */
+
+	LSM_HOOK_INIT(ismaclabel, smack_ismaclabel),
+	LSM_HOOK_INIT(secid_to_secctx, smack_secid_to_secctx),
+	LSM_HOOK_INIT(secctx_to_secid, smack_secctx_to_secid),
+	LSM_HOOK_INIT(release_secctx, smack_release_secctx),
+	LSM_HOOK_INIT(inode_notifysecctx, smack_inode_notifysecctx),
+	LSM_HOOK_INIT(inode_setsecctx, smack_inode_setsecctx),
+	LSM_HOOK_INIT(inode_getsecctx, smack_inode_getsecctx),
+};
+
+
+static __init void init_smack_known_list(void)
+{
+	/*
+	 * Initialize rule list locks
+	 */
+	mutex_init(&smack_known_huh.smk_rules_lock);
+	mutex_init(&smack_known_hat.smk_rules_lock);
+	mutex_init(&smack_known_floor.smk_rules_lock);
+	mutex_init(&smack_known_star.smk_rules_lock);
+	mutex_init(&smack_known_invalid.smk_rules_lock);
+	mutex_init(&smack_known_web.smk_rules_lock);
+	/*
+	 * Initialize rule lists
+	 */
+	INIT_LIST_HEAD(&smack_known_huh.smk_rules);
+	INIT_LIST_HEAD(&smack_known_hat.smk_rules);
+	INIT_LIST_HEAD(&smack_known_star.smk_rules);
+	INIT_LIST_HEAD(&smack_known_floor.smk_rules);
+	INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
+	INIT_LIST_HEAD(&smack_known_web.smk_rules);
+	/*
+	 * Create the known labels list
+	 */
+	smk_insert_entry(&smack_known_huh);
+	smk_insert_entry(&smack_known_hat);
+	smk_insert_entry(&smack_known_star);
+	smk_insert_entry(&smack_known_floor);
+	smk_insert_entry(&smack_known_invalid);
+	smk_insert_entry(&smack_known_web);
+}
+
+/**
+ * smack_init - initialize the smack system
+ *
+ * Returns 0
+ */
+static __init int smack_init(void)
+{
+	struct cred *cred;
+	struct task_smack *tsp;
+
+	if (!security_module_enable("smack"))
+		return 0;
+
+	smack_inode_cache = KMEM_CACHE(inode_smack, 0);
+	if (!smack_inode_cache)
+		return -ENOMEM;
+
+	tsp = new_task_smack(&smack_known_floor, &smack_known_floor,
+				GFP_KERNEL);
+	if (tsp == NULL) {
+		kmem_cache_destroy(smack_inode_cache);
+		return -ENOMEM;
+	}
+
+	smack_enabled = 1;
+
+	pr_info("Smack:  Initializing.\n");
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+	pr_info("Smack:  Netfilter enabled.\n");
+#endif
+#ifdef SMACK_IPV6_PORT_LABELING
+	pr_info("Smack:  IPv6 port labeling enabled.\n");
+#endif
+#ifdef SMACK_IPV6_SECMARK_LABELING
+	pr_info("Smack:  IPv6 Netfilter enabled.\n");
+#endif
+
+	/*
+	 * Set the security state for the initial task.
+	 */
+	cred = (struct cred *) current->cred;
+	cred->security = tsp;
+
+	/* initialize the smack_known_list */
+	init_smack_known_list();
+
+	/*
+	 * Register with LSM
+	 */
+	security_add_hooks(smack_hooks, ARRAY_SIZE(smack_hooks));
+
+	return 0;
+}
+
+/*
+ * Smack requires early initialization in order to label
+ * all processes and objects when they are created.
+ */
+security_initcall(smack_init);
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
new file mode 100644
index 0000000..aa6bf1b
--- /dev/null
+++ b/security/smack/smack_netfilter.c
@@ -0,0 +1,93 @@
+/*
+ *  Simplified MAC Kernel (smack) security module
+ *
+ *  This file contains the Smack netfilter implementation
+ *
+ *  Author:
+ *	Casey Schaufler <casey@schaufler-ca.com>
+ *
+ *  Copyright (C) 2014 Casey Schaufler <casey@schaufler-ca.com>
+ *  Copyright (C) 2014 Intel Corporation.
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License version 2,
+ *	as published by the Free Software Foundation.
+ */
+
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netdevice.h>
+#include <net/inet_sock.h>
+#include "smack.h"
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+static unsigned int smack_ipv6_output(void *priv,
+					struct sk_buff *skb,
+					const struct nf_hook_state *state)
+{
+	struct sock *sk = skb_to_full_sk(skb);
+	struct socket_smack *ssp;
+	struct smack_known *skp;
+
+	if (sk && sk->sk_security) {
+		ssp = sk->sk_security;
+		skp = ssp->smk_out;
+		skb->secmark = skp->smk_secid;
+	}
+
+	return NF_ACCEPT;
+}
+#endif	/* IPV6 */
+
+static unsigned int smack_ipv4_output(void *priv,
+					struct sk_buff *skb,
+					const struct nf_hook_state *state)
+{
+	struct sock *sk = skb_to_full_sk(skb);
+	struct socket_smack *ssp;
+	struct smack_known *skp;
+
+	if (sk && sk->sk_security) {
+		ssp = sk->sk_security;
+		skp = ssp->smk_out;
+		skb->secmark = skp->smk_secid;
+	}
+
+	return NF_ACCEPT;
+}
+
+static struct nf_hook_ops smack_nf_ops[] = {
+	{
+		.hook =		smack_ipv4_output,
+		.pf =		NFPROTO_IPV4,
+		.hooknum =	NF_INET_LOCAL_OUT,
+		.priority =	NF_IP_PRI_SELINUX_FIRST,
+	},
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	{
+		.hook =		smack_ipv6_output,
+		.pf =		NFPROTO_IPV6,
+		.hooknum =	NF_INET_LOCAL_OUT,
+		.priority =	NF_IP6_PRI_SELINUX_FIRST,
+	},
+#endif	/* IPV6 */
+};
+
+static int __init smack_nf_ip_init(void)
+{
+	int err;
+
+	if (smack_enabled == 0)
+		return 0;
+
+	printk(KERN_DEBUG "Smack: Registering netfilter hooks\n");
+
+	err = nf_register_hooks(smack_nf_ops, ARRAY_SIZE(smack_nf_ops));
+	if (err)
+		pr_info("Smack: nf_register_hooks: error %d\n", err);
+
+	return 0;
+}
+
+__initcall(smack_nf_ip_init);
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
new file mode 100644
index 0000000..94bd9e4
--- /dev/null
+++ b/security/smack/smackfs.c
@@ -0,0 +1,3069 @@
+/*
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *  	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation, version 2.
+ *
+ * Authors:
+ * 	Casey Schaufler <casey@schaufler-ca.com>
+ * 	Ahmed S. Darwish <darwish.07@gmail.com>
+ *
+ * Special thanks to the authors of selinuxfs.
+ *
+ *	Karl MacMillan <kmacmillan@tresys.com>
+ *	James Morris <jmorris@redhat.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <net/net_namespace.h>
+#include <net/cipso_ipv4.h>
+#include <linux/seq_file.h>
+#include <linux/ctype.h>
+#include <linux/audit.h>
+#include <linux/magic.h>
+#include "smack.h"
+
+#define BEBITS	(sizeof(__be32) * 8)
+/*
+ * smackfs pseudo filesystem.
+ */
+
+enum smk_inos {
+	SMK_ROOT_INO	= 2,
+	SMK_LOAD	= 3,	/* load policy */
+	SMK_CIPSO	= 4,	/* load label -> CIPSO mapping */
+	SMK_DOI		= 5,	/* CIPSO DOI */
+	SMK_DIRECT	= 6,	/* CIPSO level indicating direct label */
+	SMK_AMBIENT	= 7,	/* internet ambient label */
+	SMK_NET4ADDR	= 8,	/* single label hosts */
+	SMK_ONLYCAP	= 9,	/* the only "capable" label */
+	SMK_LOGGING	= 10,	/* logging */
+	SMK_LOAD_SELF	= 11,	/* task specific rules */
+	SMK_ACCESSES	= 12,	/* access policy */
+	SMK_MAPPED	= 13,	/* CIPSO level indicating mapped label */
+	SMK_LOAD2	= 14,	/* load policy with long labels */
+	SMK_LOAD_SELF2	= 15,	/* load task specific rules with long labels */
+	SMK_ACCESS2	= 16,	/* make an access check with long labels */
+	SMK_CIPSO2	= 17,	/* load long label -> CIPSO mapping */
+	SMK_REVOKE_SUBJ	= 18,	/* set rules with subject label to '-' */
+	SMK_CHANGE_RULE	= 19,	/* change or add rules (long labels) */
+	SMK_SYSLOG	= 20,	/* change syslog label) */
+	SMK_PTRACE	= 21,	/* set ptrace rule */
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+	SMK_UNCONFINED	= 22,	/* define an unconfined label */
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
+	SMK_NET6ADDR	= 23,	/* single label IPv6 hosts */
+#endif /* CONFIG_IPV6 */
+	SMK_RELABEL_SELF = 24, /* relabel possible without CAP_MAC_ADMIN */
+};
+
+/*
+ * List locks
+ */
+static DEFINE_MUTEX(smack_cipso_lock);
+static DEFINE_MUTEX(smack_ambient_lock);
+static DEFINE_MUTEX(smk_net4addr_lock);
+#if IS_ENABLED(CONFIG_IPV6)
+static DEFINE_MUTEX(smk_net6addr_lock);
+#endif /* CONFIG_IPV6 */
+
+/*
+ * This is the "ambient" label for network traffic.
+ * If it isn't somehow marked, use this.
+ * It can be reset via smackfs/ambient
+ */
+struct smack_known *smack_net_ambient;
+
+/*
+ * This is the level in a CIPSO header that indicates a
+ * smack label is contained directly in the category set.
+ * It can be reset via smackfs/direct
+ */
+int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT;
+
+/*
+ * This is the level in a CIPSO header that indicates a
+ * secid is contained directly in the category set.
+ * It can be reset via smackfs/mapped
+ */
+int smack_cipso_mapped = SMACK_CIPSO_MAPPED_DEFAULT;
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+/*
+ * Allow one label to be unconfined. This is for
+ * debugging and application bring-up purposes only.
+ * It is bad and wrong, but everyone seems to expect
+ * to have it.
+ */
+struct smack_known *smack_unconfined;
+#endif
+
+/*
+ * If this value is set restrict syslog use to the label specified.
+ * It can be reset via smackfs/syslog
+ */
+struct smack_known *smack_syslog_label;
+
+/*
+ * Ptrace current rule
+ * SMACK_PTRACE_DEFAULT    regular smack ptrace rules (/proc based)
+ * SMACK_PTRACE_EXACT      labels must match, but can be overriden with
+ *			   CAP_SYS_PTRACE
+ * SMACK_PTRACE_DRACONIAN  lables must match, CAP_SYS_PTRACE has no effect
+ */
+int smack_ptrace_rule = SMACK_PTRACE_DEFAULT;
+
+/*
+ * Certain IP addresses may be designated as single label hosts.
+ * Packets are sent there unlabeled, but only from tasks that
+ * can write to the specified label.
+ */
+
+LIST_HEAD(smk_net4addr_list);
+#if IS_ENABLED(CONFIG_IPV6)
+LIST_HEAD(smk_net6addr_list);
+#endif /* CONFIG_IPV6 */
+
+/*
+ * Rule lists are maintained for each label.
+ * This master list is just for reading /smack/load and /smack/load2.
+ */
+struct smack_master_list {
+	struct list_head	list;
+	struct smack_rule	*smk_rule;
+};
+
+static LIST_HEAD(smack_rule_list);
+
+struct smack_parsed_rule {
+	struct smack_known	*smk_subject;
+	struct smack_known	*smk_object;
+	int			smk_access1;
+	int			smk_access2;
+};
+
+static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
+
+/*
+ * Values for parsing cipso rules
+ * SMK_DIGITLEN: Length of a digit field in a rule.
+ * SMK_CIPSOMIN: Minimum possible cipso rule length.
+ * SMK_CIPSOMAX: Maximum possible cipso rule length.
+ */
+#define SMK_DIGITLEN 4
+#define SMK_CIPSOMIN (SMK_LABELLEN + 2 * SMK_DIGITLEN)
+#define SMK_CIPSOMAX (SMK_CIPSOMIN + SMACK_CIPSO_MAXCATNUM * SMK_DIGITLEN)
+
+/*
+ * Values for parsing MAC rules
+ * SMK_ACCESS: Maximum possible combination of access permissions
+ * SMK_ACCESSLEN: Maximum length for a rule access field
+ * SMK_LOADLEN: Smack rule length
+ */
+#define SMK_OACCESS	"rwxa"
+#define SMK_ACCESS	"rwxatl"
+#define SMK_OACCESSLEN	(sizeof(SMK_OACCESS) - 1)
+#define SMK_ACCESSLEN	(sizeof(SMK_ACCESS) - 1)
+#define SMK_OLOADLEN	(SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN)
+#define SMK_LOADLEN	(SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN)
+
+/*
+ * Stricly for CIPSO level manipulation.
+ * Set the category bit number in a smack label sized buffer.
+ */
+static inline void smack_catset_bit(unsigned int cat, char *catsetp)
+{
+	if (cat == 0 || cat > (SMK_CIPSOLEN * 8))
+		return;
+
+	catsetp[(cat - 1) / 8] |= 0x80 >> ((cat - 1) % 8);
+}
+
+/**
+ * smk_netlabel_audit_set - fill a netlbl_audit struct
+ * @nap: structure to fill
+ */
+static void smk_netlabel_audit_set(struct netlbl_audit *nap)
+{
+	struct smack_known *skp = smk_of_current();
+
+	nap->loginuid = audit_get_loginuid(current);
+	nap->sessionid = audit_get_sessionid(current);
+	nap->secid = skp->smk_secid;
+}
+
+/*
+ * Value for parsing single label host rules
+ * "1.2.3.4 X"
+ */
+#define SMK_NETLBLADDRMIN	9
+
+/**
+ * smk_set_access - add a rule to the rule list or replace an old rule
+ * @srp: the rule to add or replace
+ * @rule_list: the list of rules
+ * @rule_lock: the rule list lock
+ * @global: if non-zero, indicates a global rule
+ *
+ * Looks through the current subject/object/access list for
+ * the subject/object pair and replaces the access that was
+ * there. If the pair isn't found add it with the specified
+ * access.
+ *
+ * Returns 0 if nothing goes wrong or -ENOMEM if it fails
+ * during the allocation of the new pair to add.
+ */
+static int smk_set_access(struct smack_parsed_rule *srp,
+				struct list_head *rule_list,
+				struct mutex *rule_lock, int global)
+{
+	struct smack_rule *sp;
+	struct smack_master_list *smlp;
+	int found = 0;
+	int rc = 0;
+
+	mutex_lock(rule_lock);
+
+	/*
+	 * Because the object label is less likely to match
+	 * than the subject label check it first
+	 */
+	list_for_each_entry_rcu(sp, rule_list, list) {
+		if (sp->smk_object == srp->smk_object &&
+		    sp->smk_subject == srp->smk_subject) {
+			found = 1;
+			sp->smk_access |= srp->smk_access1;
+			sp->smk_access &= ~srp->smk_access2;
+			break;
+		}
+	}
+
+	if (found == 0) {
+		sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+		if (sp == NULL) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		sp->smk_subject = srp->smk_subject;
+		sp->smk_object = srp->smk_object;
+		sp->smk_access = srp->smk_access1 & ~srp->smk_access2;
+
+		list_add_rcu(&sp->list, rule_list);
+		/*
+		 * If this is a global as opposed to self and a new rule
+		 * it needs to get added for reporting.
+		 */
+		if (global) {
+			smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
+			if (smlp != NULL) {
+				smlp->smk_rule = sp;
+				list_add_rcu(&smlp->list, &smack_rule_list);
+			} else
+				rc = -ENOMEM;
+		}
+	}
+
+out:
+	mutex_unlock(rule_lock);
+	return rc;
+}
+
+/**
+ * smk_perm_from_str - parse smack accesses from a text string
+ * @string: a text string that contains a Smack accesses code
+ *
+ * Returns an integer with respective bits set for specified accesses.
+ */
+static int smk_perm_from_str(const char *string)
+{
+	int perm = 0;
+	const char *cp;
+
+	for (cp = string; ; cp++)
+		switch (*cp) {
+		case '-':
+			break;
+		case 'r':
+		case 'R':
+			perm |= MAY_READ;
+			break;
+		case 'w':
+		case 'W':
+			perm |= MAY_WRITE;
+			break;
+		case 'x':
+		case 'X':
+			perm |= MAY_EXEC;
+			break;
+		case 'a':
+		case 'A':
+			perm |= MAY_APPEND;
+			break;
+		case 't':
+		case 'T':
+			perm |= MAY_TRANSMUTE;
+			break;
+		case 'l':
+		case 'L':
+			perm |= MAY_LOCK;
+			break;
+		case 'b':
+		case 'B':
+			perm |= MAY_BRINGUP;
+			break;
+		default:
+			return perm;
+		}
+}
+
+/**
+ * smk_fill_rule - Fill Smack rule from strings
+ * @subject: subject label string
+ * @object: object label string
+ * @access1: access string
+ * @access2: string with permissions to be removed
+ * @rule: Smack rule
+ * @import: if non-zero, import labels
+ * @len: label length limit
+ *
+ * Returns 0 on success, appropriate error code on failure.
+ */
+static int smk_fill_rule(const char *subject, const char *object,
+				const char *access1, const char *access2,
+				struct smack_parsed_rule *rule, int import,
+				int len)
+{
+	const char *cp;
+	struct smack_known *skp;
+
+	if (import) {
+		rule->smk_subject = smk_import_entry(subject, len);
+		if (IS_ERR(rule->smk_subject))
+			return PTR_ERR(rule->smk_subject);
+
+		rule->smk_object = smk_import_entry(object, len);
+		if (IS_ERR(rule->smk_object))
+			return PTR_ERR(rule->smk_object);
+	} else {
+		cp = smk_parse_smack(subject, len);
+		if (IS_ERR(cp))
+			return PTR_ERR(cp);
+		skp = smk_find_entry(cp);
+		kfree(cp);
+		if (skp == NULL)
+			return -ENOENT;
+		rule->smk_subject = skp;
+
+		cp = smk_parse_smack(object, len);
+		if (IS_ERR(cp))
+			return PTR_ERR(cp);
+		skp = smk_find_entry(cp);
+		kfree(cp);
+		if (skp == NULL)
+			return -ENOENT;
+		rule->smk_object = skp;
+	}
+
+	rule->smk_access1 = smk_perm_from_str(access1);
+	if (access2)
+		rule->smk_access2 = smk_perm_from_str(access2);
+	else
+		rule->smk_access2 = ~rule->smk_access1;
+
+	return 0;
+}
+
+/**
+ * smk_parse_rule - parse Smack rule from load string
+ * @data: string to be parsed whose size is SMK_LOADLEN
+ * @rule: Smack rule
+ * @import: if non-zero, import labels
+ *
+ * Returns 0 on success, -1 on errors.
+ */
+static int smk_parse_rule(const char *data, struct smack_parsed_rule *rule,
+				int import)
+{
+	int rc;
+
+	rc = smk_fill_rule(data, data + SMK_LABELLEN,
+			   data + SMK_LABELLEN + SMK_LABELLEN, NULL, rule,
+			   import, SMK_LABELLEN);
+	return rc;
+}
+
+/**
+ * smk_parse_long_rule - parse Smack rule from rule string
+ * @data: string to be parsed, null terminated
+ * @rule: Will be filled with Smack parsed rule
+ * @import: if non-zero, import labels
+ * @tokens: numer of substrings expected in data
+ *
+ * Returns number of processed bytes on success, -ERRNO on failure.
+ */
+static ssize_t smk_parse_long_rule(char *data, struct smack_parsed_rule *rule,
+				int import, int tokens)
+{
+	ssize_t cnt = 0;
+	char *tok[4];
+	int rc;
+	int i;
+
+	/*
+	 * Parsing the rule in-place, filling all white-spaces with '\0'
+	 */
+	for (i = 0; i < tokens; ++i) {
+		while (isspace(data[cnt]))
+			data[cnt++] = '\0';
+
+		if (data[cnt] == '\0')
+			/* Unexpected end of data */
+			return -EINVAL;
+
+		tok[i] = data + cnt;
+
+		while (data[cnt] && !isspace(data[cnt]))
+			++cnt;
+	}
+	while (isspace(data[cnt]))
+		data[cnt++] = '\0';
+
+	while (i < 4)
+		tok[i++] = NULL;
+
+	rc = smk_fill_rule(tok[0], tok[1], tok[2], tok[3], rule, import, 0);
+	return rc == 0 ? cnt : rc;
+}
+
+#define SMK_FIXED24_FMT	0	/* Fixed 24byte label format */
+#define SMK_LONG_FMT	1	/* Variable long label format */
+#define SMK_CHANGE_FMT	2	/* Rule modification format */
+/**
+ * smk_write_rules_list - write() for any /smack rule file
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ * @rule_list: the list of rules to write to
+ * @rule_lock: lock for the rule list
+ * @format: /smack/load or /smack/load2 or /smack/change-rule format.
+ *
+ * Get one smack access rule from above.
+ * The format for SMK_LONG_FMT is:
+ *	"subject<whitespace>object<whitespace>access[<whitespace>...]"
+ * The format for SMK_FIXED24_FMT is exactly:
+ *	"subject                 object                  rwxat"
+ * The format for SMK_CHANGE_FMT is:
+ *	"subject<whitespace>object<whitespace>
+ *	 acc_enable<whitespace>acc_disable[<whitespace>...]"
+ */
+static ssize_t smk_write_rules_list(struct file *file, const char __user *buf,
+					size_t count, loff_t *ppos,
+					struct list_head *rule_list,
+					struct mutex *rule_lock, int format)
+{
+	struct smack_parsed_rule rule;
+	char *data;
+	int rc;
+	int trunc = 0;
+	int tokens;
+	ssize_t cnt = 0;
+
+	/*
+	 * No partial writes.
+	 * Enough data must be present.
+	 */
+	if (*ppos != 0)
+		return -EINVAL;
+
+	if (format == SMK_FIXED24_FMT) {
+		/*
+		 * Minor hack for backward compatibility
+		 */
+		if (count < SMK_OLOADLEN || count > SMK_LOADLEN)
+			return -EINVAL;
+	} else {
+		if (count >= PAGE_SIZE) {
+			count = PAGE_SIZE - 1;
+			trunc = 1;
+		}
+	}
+
+	data = kmalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	/*
+	 * In case of parsing only part of user buf,
+	 * avoid having partial rule at the data buffer
+	 */
+	if (trunc) {
+		while (count > 0 && (data[count - 1] != '\n'))
+			--count;
+		if (count == 0) {
+			rc = -EINVAL;
+			goto out;
+		}
+	}
+
+	data[count] = '\0';
+	tokens = (format == SMK_CHANGE_FMT ? 4 : 3);
+	while (cnt < count) {
+		if (format == SMK_FIXED24_FMT) {
+			rc = smk_parse_rule(data, &rule, 1);
+			if (rc < 0)
+				goto out;
+			cnt = count;
+		} else {
+			rc = smk_parse_long_rule(data + cnt, &rule, 1, tokens);
+			if (rc < 0)
+				goto out;
+			if (rc == 0) {
+				rc = -EINVAL;
+				goto out;
+			}
+			cnt += rc;
+		}
+
+		if (rule_list == NULL)
+			rc = smk_set_access(&rule, &rule.smk_subject->smk_rules,
+				&rule.smk_subject->smk_rules_lock, 1);
+		else
+			rc = smk_set_access(&rule, rule_list, rule_lock, 0);
+
+		if (rc)
+			goto out;
+	}
+
+	rc = cnt;
+out:
+	kfree(data);
+	return rc;
+}
+
+/*
+ * Core logic for smackfs seq list operations.
+ */
+
+static void *smk_seq_start(struct seq_file *s, loff_t *pos,
+				struct list_head *head)
+{
+	struct list_head *list;
+	int i = *pos;
+
+	rcu_read_lock();
+	for (list = rcu_dereference(list_next_rcu(head));
+		list != head;
+		list = rcu_dereference(list_next_rcu(list))) {
+		if (i-- == 0)
+			return list;
+	}
+
+	return NULL;
+}
+
+static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos,
+				struct list_head *head)
+{
+	struct list_head *list = v;
+
+	++*pos;
+	list = rcu_dereference(list_next_rcu(list));
+
+	return (list == head) ? NULL : list;
+}
+
+static void smk_seq_stop(struct seq_file *s, void *v)
+{
+	rcu_read_unlock();
+}
+
+static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
+{
+	/*
+	 * Don't show any rules with label names too long for
+	 * interface file (/smack/load or /smack/load2)
+	 * because you should expect to be able to write
+	 * anything you read back.
+	 */
+	if (strlen(srp->smk_subject->smk_known) >= max ||
+	    strlen(srp->smk_object->smk_known) >= max)
+		return;
+
+	if (srp->smk_access == 0)
+		return;
+
+	seq_printf(s, "%s %s",
+		   srp->smk_subject->smk_known,
+		   srp->smk_object->smk_known);
+
+	seq_putc(s, ' ');
+
+	if (srp->smk_access & MAY_READ)
+		seq_putc(s, 'r');
+	if (srp->smk_access & MAY_WRITE)
+		seq_putc(s, 'w');
+	if (srp->smk_access & MAY_EXEC)
+		seq_putc(s, 'x');
+	if (srp->smk_access & MAY_APPEND)
+		seq_putc(s, 'a');
+	if (srp->smk_access & MAY_TRANSMUTE)
+		seq_putc(s, 't');
+	if (srp->smk_access & MAY_LOCK)
+		seq_putc(s, 'l');
+	if (srp->smk_access & MAY_BRINGUP)
+		seq_putc(s, 'b');
+
+	seq_putc(s, '\n');
+}
+
+/*
+ * Seq_file read operations for /smack/load
+ */
+
+static void *load2_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return smk_seq_start(s, pos, &smack_rule_list);
+}
+
+static void *load2_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	return smk_seq_next(s, v, pos, &smack_rule_list);
+}
+
+static int load_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head *list = v;
+	struct smack_master_list *smlp =
+		list_entry_rcu(list, struct smack_master_list, list);
+
+	smk_rule_show(s, smlp->smk_rule, SMK_LABELLEN);
+
+	return 0;
+}
+
+static const struct seq_operations load_seq_ops = {
+	.start = load2_seq_start,
+	.next  = load2_seq_next,
+	.show  = load_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+/**
+ * smk_open_load - open() for /smack/load
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &load_seq_ops);
+}
+
+/**
+ * smk_write_load - write() for /smack/load
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	/*
+	 * Must have privilege.
+	 * No partial writes.
+	 * Enough data must be present.
+	 */
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+				    SMK_FIXED24_FMT);
+}
+
+static const struct file_operations smk_load_ops = {
+	.open           = smk_open_load,
+	.read		= seq_read,
+	.llseek         = seq_lseek,
+	.write		= smk_write_load,
+	.release        = seq_release,
+};
+
+/**
+ * smk_cipso_doi - initialize the CIPSO domain
+ */
+static void smk_cipso_doi(void)
+{
+	int rc;
+	struct cipso_v4_doi *doip;
+	struct netlbl_audit nai;
+
+	smk_netlabel_audit_set(&nai);
+
+	rc = netlbl_cfg_map_del(NULL, PF_INET, NULL, NULL, &nai);
+	if (rc != 0)
+		printk(KERN_WARNING "%s:%d remove rc = %d\n",
+		       __func__, __LINE__, rc);
+
+	doip = kmalloc(sizeof(struct cipso_v4_doi), GFP_KERNEL);
+	if (doip == NULL)
+		panic("smack:  Failed to initialize cipso DOI.\n");
+	doip->map.std = NULL;
+	doip->doi = smk_cipso_doi_value;
+	doip->type = CIPSO_V4_MAP_PASS;
+	doip->tags[0] = CIPSO_V4_TAG_RBITMAP;
+	for (rc = 1; rc < CIPSO_V4_TAG_MAXCNT; rc++)
+		doip->tags[rc] = CIPSO_V4_TAG_INVALID;
+
+	rc = netlbl_cfg_cipsov4_add(doip, &nai);
+	if (rc != 0) {
+		printk(KERN_WARNING "%s:%d cipso add rc = %d\n",
+		       __func__, __LINE__, rc);
+		kfree(doip);
+		return;
+	}
+	rc = netlbl_cfg_cipsov4_map_add(doip->doi, NULL, NULL, NULL, &nai);
+	if (rc != 0) {
+		printk(KERN_WARNING "%s:%d map add rc = %d\n",
+		       __func__, __LINE__, rc);
+		kfree(doip);
+		return;
+	}
+}
+
+/**
+ * smk_unlbl_ambient - initialize the unlabeled domain
+ * @oldambient: previous domain string
+ */
+static void smk_unlbl_ambient(char *oldambient)
+{
+	int rc;
+	struct netlbl_audit nai;
+
+	smk_netlabel_audit_set(&nai);
+
+	if (oldambient != NULL) {
+		rc = netlbl_cfg_map_del(oldambient, PF_INET, NULL, NULL, &nai);
+		if (rc != 0)
+			printk(KERN_WARNING "%s:%d remove rc = %d\n",
+			       __func__, __LINE__, rc);
+	}
+	if (smack_net_ambient == NULL)
+		smack_net_ambient = &smack_known_floor;
+
+	rc = netlbl_cfg_unlbl_map_add(smack_net_ambient->smk_known, PF_INET,
+				      NULL, NULL, &nai);
+	if (rc != 0)
+		printk(KERN_WARNING "%s:%d add rc = %d\n",
+		       __func__, __LINE__, rc);
+}
+
+/*
+ * Seq_file read operations for /smack/cipso
+ */
+
+static void *cipso_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return smk_seq_start(s, pos, &smack_known_list);
+}
+
+static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	return smk_seq_next(s, v, pos, &smack_known_list);
+}
+
+/*
+ * Print cipso labels in format:
+ * label level[/cat[,cat]]
+ */
+static int cipso_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head  *list = v;
+	struct smack_known *skp =
+		list_entry_rcu(list, struct smack_known, list);
+	struct netlbl_lsm_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
+	char sep = '/';
+	int i;
+
+	/*
+	 * Don't show a label that could not have been set using
+	 * /smack/cipso. This is in support of the notion that
+	 * anything read from /smack/cipso ought to be writeable
+	 * to /smack/cipso.
+	 *
+	 * /smack/cipso2 should be used instead.
+	 */
+	if (strlen(skp->smk_known) >= SMK_LABELLEN)
+		return 0;
+
+	seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl);
+
+	for (i = netlbl_catmap_walk(cmp, 0); i >= 0;
+	     i = netlbl_catmap_walk(cmp, i + 1)) {
+		seq_printf(s, "%c%d", sep, i);
+		sep = ',';
+	}
+
+	seq_putc(s, '\n');
+
+	return 0;
+}
+
+static const struct seq_operations cipso_seq_ops = {
+	.start = cipso_seq_start,
+	.next  = cipso_seq_next,
+	.show  = cipso_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+/**
+ * smk_open_cipso - open() for /smack/cipso
+ * @inode: inode structure representing file
+ * @file: "cipso" file pointer
+ *
+ * Connect our cipso_seq_* operations with /smack/cipso
+ * file_operations
+ */
+static int smk_open_cipso(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &cipso_seq_ops);
+}
+
+/**
+ * smk_set_cipso - do the work for write() for cipso and cipso2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ * @format: /smack/cipso or /smack/cipso2
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos, int format)
+{
+	struct smack_known *skp;
+	struct netlbl_lsm_secattr ncats;
+	char mapcatset[SMK_CIPSOLEN];
+	int maplevel;
+	unsigned int cat;
+	int catlen;
+	ssize_t rc = -EINVAL;
+	char *data = NULL;
+	char *rule;
+	int ret;
+	int i;
+
+	/*
+	 * Must have privilege.
+	 * No partial writes.
+	 * Enough data must be present.
+	 */
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+	if (*ppos != 0)
+		return -EINVAL;
+	if (format == SMK_FIXED24_FMT &&
+	    (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX))
+		return -EINVAL;
+
+	data = kzalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		rc = -EFAULT;
+		goto unlockedout;
+	}
+
+	data[count] = '\0';
+	rule = data;
+	/*
+	 * Only allow one writer at a time. Writes should be
+	 * quite rare and small in any case.
+	 */
+	mutex_lock(&smack_cipso_lock);
+
+	skp = smk_import_entry(rule, 0);
+	if (IS_ERR(skp)) {
+		rc = PTR_ERR(skp);
+		goto out;
+	}
+
+	if (format == SMK_FIXED24_FMT)
+		rule += SMK_LABELLEN;
+	else
+		rule += strlen(skp->smk_known) + 1;
+
+	ret = sscanf(rule, "%d", &maplevel);
+	if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
+		goto out;
+
+	rule += SMK_DIGITLEN;
+	ret = sscanf(rule, "%d", &catlen);
+	if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
+		goto out;
+
+	if (format == SMK_FIXED24_FMT &&
+	    count != (SMK_CIPSOMIN + catlen * SMK_DIGITLEN))
+		goto out;
+
+	memset(mapcatset, 0, sizeof(mapcatset));
+
+	for (i = 0; i < catlen; i++) {
+		rule += SMK_DIGITLEN;
+		ret = sscanf(rule, "%u", &cat);
+		if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM)
+			goto out;
+
+		smack_catset_bit(cat, mapcatset);
+	}
+
+	rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN);
+	if (rc >= 0) {
+		netlbl_catmap_free(skp->smk_netlabel.attr.mls.cat);
+		skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat;
+		skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl;
+		rc = count;
+	}
+
+out:
+	mutex_unlock(&smack_cipso_lock);
+unlockedout:
+	kfree(data);
+	return rc;
+}
+
+/**
+ * smk_write_cipso - write() for /smack/cipso
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	return smk_set_cipso(file, buf, count, ppos, SMK_FIXED24_FMT);
+}
+
+static const struct file_operations smk_cipso_ops = {
+	.open           = smk_open_cipso,
+	.read		= seq_read,
+	.llseek         = seq_lseek,
+	.write		= smk_write_cipso,
+	.release        = seq_release,
+};
+
+/*
+ * Seq_file read operations for /smack/cipso2
+ */
+
+/*
+ * Print cipso labels in format:
+ * label level[/cat[,cat]]
+ */
+static int cipso2_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head  *list = v;
+	struct smack_known *skp =
+		list_entry_rcu(list, struct smack_known, list);
+	struct netlbl_lsm_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
+	char sep = '/';
+	int i;
+
+	seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl);
+
+	for (i = netlbl_catmap_walk(cmp, 0); i >= 0;
+	     i = netlbl_catmap_walk(cmp, i + 1)) {
+		seq_printf(s, "%c%d", sep, i);
+		sep = ',';
+	}
+
+	seq_putc(s, '\n');
+
+	return 0;
+}
+
+static const struct seq_operations cipso2_seq_ops = {
+	.start = cipso_seq_start,
+	.next  = cipso_seq_next,
+	.show  = cipso2_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+/**
+ * smk_open_cipso2 - open() for /smack/cipso2
+ * @inode: inode structure representing file
+ * @file: "cipso2" file pointer
+ *
+ * Connect our cipso_seq_* operations with /smack/cipso2
+ * file_operations
+ */
+static int smk_open_cipso2(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &cipso2_seq_ops);
+}
+
+/**
+ * smk_write_cipso2 - write() for /smack/cipso2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_cipso2(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	return smk_set_cipso(file, buf, count, ppos, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_cipso2_ops = {
+	.open           = smk_open_cipso2,
+	.read		= seq_read,
+	.llseek         = seq_lseek,
+	.write		= smk_write_cipso2,
+	.release        = seq_release,
+};
+
+/*
+ * Seq_file read operations for /smack/netlabel
+ */
+
+static void *net4addr_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return smk_seq_start(s, pos, &smk_net4addr_list);
+}
+
+static void *net4addr_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	return smk_seq_next(s, v, pos, &smk_net4addr_list);
+}
+
+/*
+ * Print host/label pairs
+ */
+static int net4addr_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head *list = v;
+	struct smk_net4addr *skp =
+			list_entry_rcu(list, struct smk_net4addr, list);
+	char *kp = SMACK_CIPSO_OPTION;
+
+	if (skp->smk_label != NULL)
+		kp = skp->smk_label->smk_known;
+	seq_printf(s, "%pI4/%d %s\n", &skp->smk_host.s_addr,
+			skp->smk_masks, kp);
+
+	return 0;
+}
+
+static const struct seq_operations net4addr_seq_ops = {
+	.start = net4addr_seq_start,
+	.next  = net4addr_seq_next,
+	.show  = net4addr_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+/**
+ * smk_open_net4addr - open() for /smack/netlabel
+ * @inode: inode structure representing file
+ * @file: "netlabel" file pointer
+ *
+ * Connect our net4addr_seq_* operations with /smack/netlabel
+ * file_operations
+ */
+static int smk_open_net4addr(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &net4addr_seq_ops);
+}
+
+/**
+ * smk_net4addr_insert
+ * @new : netlabel to insert
+ *
+ * This helper insert netlabel in the smack_net4addrs list
+ * sorted by netmask length (longest to smallest)
+ * locked by &smk_net4addr_lock in smk_write_net4addr
+ *
+ */
+static void smk_net4addr_insert(struct smk_net4addr *new)
+{
+	struct smk_net4addr *m;
+	struct smk_net4addr *m_next;
+
+	if (list_empty(&smk_net4addr_list)) {
+		list_add_rcu(&new->list, &smk_net4addr_list);
+		return;
+	}
+
+	m = list_entry_rcu(smk_net4addr_list.next,
+			   struct smk_net4addr, list);
+
+	/* the comparison '>' is a bit hacky, but works */
+	if (new->smk_masks > m->smk_masks) {
+		list_add_rcu(&new->list, &smk_net4addr_list);
+		return;
+	}
+
+	list_for_each_entry_rcu(m, &smk_net4addr_list, list) {
+		if (list_is_last(&m->list, &smk_net4addr_list)) {
+			list_add_rcu(&new->list, &m->list);
+			return;
+		}
+		m_next = list_entry_rcu(m->list.next,
+					struct smk_net4addr, list);
+		if (new->smk_masks > m_next->smk_masks) {
+			list_add_rcu(&new->list, &m->list);
+			return;
+		}
+	}
+}
+
+
+/**
+ * smk_write_net4addr - write() for /smack/netlabel
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one net4addr per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_net4addr(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct smk_net4addr *snp;
+	struct sockaddr_in newname;
+	char *smack;
+	struct smack_known *skp = NULL;
+	char *data;
+	char *host = (char *)&newname.sin_addr.s_addr;
+	int rc;
+	struct netlbl_audit audit_info;
+	struct in_addr mask;
+	unsigned int m;
+	unsigned int masks;
+	int found;
+	u32 mask_bits = (1<<31);
+	__be32 nsa;
+	u32 temp_mask;
+
+	/*
+	 * Must have privilege.
+	 * No partial writes.
+	 * Enough data must be present.
+	 * "<addr/mask, as a.b.c.d/e><space><label>"
+	 * "<addr, as a.b.c.d><space><label>"
+	 */
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+	if (*ppos != 0)
+		return -EINVAL;
+	if (count < SMK_NETLBLADDRMIN)
+		return -EINVAL;
+
+	data = kzalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		rc = -EFAULT;
+		goto free_data_out;
+	}
+
+	smack = kzalloc(count + 1, GFP_KERNEL);
+	if (smack == NULL) {
+		rc = -ENOMEM;
+		goto free_data_out;
+	}
+
+	data[count] = '\0';
+
+	rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%u %s",
+		&host[0], &host[1], &host[2], &host[3], &masks, smack);
+	if (rc != 6) {
+		rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd %s",
+			&host[0], &host[1], &host[2], &host[3], smack);
+		if (rc != 5) {
+			rc = -EINVAL;
+			goto free_out;
+		}
+		m = BEBITS;
+		masks = 32;
+	}
+	if (masks > BEBITS) {
+		rc = -EINVAL;
+		goto free_out;
+	}
+
+	/*
+	 * If smack begins with '-', it is an option, don't import it
+	 */
+	if (smack[0] != '-') {
+		skp = smk_import_entry(smack, 0);
+		if (IS_ERR(skp)) {
+			rc = PTR_ERR(skp);
+			goto free_out;
+		}
+	} else {
+		/*
+		 * Only the -CIPSO option is supported for IPv4
+		 */
+		if (strcmp(smack, SMACK_CIPSO_OPTION) != 0) {
+			rc = -EINVAL;
+			goto free_out;
+		}
+	}
+
+	for (m = masks, temp_mask = 0; m > 0; m--) {
+		temp_mask |= mask_bits;
+		mask_bits >>= 1;
+	}
+	mask.s_addr = cpu_to_be32(temp_mask);
+
+	newname.sin_addr.s_addr &= mask.s_addr;
+	/*
+	 * Only allow one writer at a time. Writes should be
+	 * quite rare and small in any case.
+	 */
+	mutex_lock(&smk_net4addr_lock);
+
+	nsa = newname.sin_addr.s_addr;
+	/* try to find if the prefix is already in the list */
+	found = 0;
+	list_for_each_entry_rcu(snp, &smk_net4addr_list, list) {
+		if (snp->smk_host.s_addr == nsa && snp->smk_masks == masks) {
+			found = 1;
+			break;
+		}
+	}
+	smk_netlabel_audit_set(&audit_info);
+
+	if (found == 0) {
+		snp = kzalloc(sizeof(*snp), GFP_KERNEL);
+		if (snp == NULL)
+			rc = -ENOMEM;
+		else {
+			rc = 0;
+			snp->smk_host.s_addr = newname.sin_addr.s_addr;
+			snp->smk_mask.s_addr = mask.s_addr;
+			snp->smk_label = skp;
+			snp->smk_masks = masks;
+			smk_net4addr_insert(snp);
+		}
+	} else {
+		/*
+		 * Delete the unlabeled entry, only if the previous label
+		 * wasn't the special CIPSO option
+		 */
+		if (snp->smk_label != NULL)
+			rc = netlbl_cfg_unlbl_static_del(&init_net, NULL,
+					&snp->smk_host, &snp->smk_mask,
+					PF_INET, &audit_info);
+		else
+			rc = 0;
+		snp->smk_label = skp;
+	}
+
+	/*
+	 * Now tell netlabel about the single label nature of
+	 * this host so that incoming packets get labeled.
+	 * but only if we didn't get the special CIPSO option
+	 */
+	if (rc == 0 && skp != NULL)
+		rc = netlbl_cfg_unlbl_static_add(&init_net, NULL,
+			&snp->smk_host, &snp->smk_mask, PF_INET,
+			snp->smk_label->smk_secid, &audit_info);
+
+	if (rc == 0)
+		rc = count;
+
+	mutex_unlock(&smk_net4addr_lock);
+
+free_out:
+	kfree(smack);
+free_data_out:
+	kfree(data);
+
+	return rc;
+}
+
+static const struct file_operations smk_net4addr_ops = {
+	.open           = smk_open_net4addr,
+	.read		= seq_read,
+	.llseek         = seq_lseek,
+	.write		= smk_write_net4addr,
+	.release        = seq_release,
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+/*
+ * Seq_file read operations for /smack/netlabel6
+ */
+
+static void *net6addr_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return smk_seq_start(s, pos, &smk_net6addr_list);
+}
+
+static void *net6addr_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	return smk_seq_next(s, v, pos, &smk_net6addr_list);
+}
+
+/*
+ * Print host/label pairs
+ */
+static int net6addr_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head *list = v;
+	struct smk_net6addr *skp =
+			 list_entry(list, struct smk_net6addr, list);
+
+	if (skp->smk_label != NULL)
+		seq_printf(s, "%pI6/%d %s\n", &skp->smk_host, skp->smk_masks,
+				skp->smk_label->smk_known);
+
+	return 0;
+}
+
+static const struct seq_operations net6addr_seq_ops = {
+	.start = net6addr_seq_start,
+	.next  = net6addr_seq_next,
+	.show  = net6addr_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+/**
+ * smk_open_net6addr - open() for /smack/netlabel
+ * @inode: inode structure representing file
+ * @file: "netlabel" file pointer
+ *
+ * Connect our net6addr_seq_* operations with /smack/netlabel
+ * file_operations
+ */
+static int smk_open_net6addr(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &net6addr_seq_ops);
+}
+
+/**
+ * smk_net6addr_insert
+ * @new : entry to insert
+ *
+ * This inserts an entry in the smack_net6addrs list
+ * sorted by netmask length (longest to smallest)
+ * locked by &smk_net6addr_lock in smk_write_net6addr
+ *
+ */
+static void smk_net6addr_insert(struct smk_net6addr *new)
+{
+	struct smk_net6addr *m_next;
+	struct smk_net6addr *m;
+
+	if (list_empty(&smk_net6addr_list)) {
+		list_add_rcu(&new->list, &smk_net6addr_list);
+		return;
+	}
+
+	m = list_entry_rcu(smk_net6addr_list.next,
+			   struct smk_net6addr, list);
+
+	if (new->smk_masks > m->smk_masks) {
+		list_add_rcu(&new->list, &smk_net6addr_list);
+		return;
+	}
+
+	list_for_each_entry_rcu(m, &smk_net6addr_list, list) {
+		if (list_is_last(&m->list, &smk_net6addr_list)) {
+			list_add_rcu(&new->list, &m->list);
+			return;
+		}
+		m_next = list_entry_rcu(m->list.next,
+					struct smk_net6addr, list);
+		if (new->smk_masks > m_next->smk_masks) {
+			list_add_rcu(&new->list, &m->list);
+			return;
+		}
+	}
+}
+
+
+/**
+ * smk_write_net6addr - write() for /smack/netlabel
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one net6addr per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_net6addr(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct smk_net6addr *snp;
+	struct in6_addr newname;
+	struct in6_addr fullmask;
+	struct smack_known *skp = NULL;
+	char *smack;
+	char *data;
+	int rc = 0;
+	int found = 0;
+	int i;
+	unsigned int scanned[8];
+	unsigned int m;
+	unsigned int mask = 128;
+
+	/*
+	 * Must have privilege.
+	 * No partial writes.
+	 * Enough data must be present.
+	 * "<addr/mask, as a:b:c:d:e:f:g:h/e><space><label>"
+	 * "<addr, as a:b:c:d:e:f:g:h><space><label>"
+	 */
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+	if (*ppos != 0)
+		return -EINVAL;
+	if (count < SMK_NETLBLADDRMIN)
+		return -EINVAL;
+
+	data = kzalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		rc = -EFAULT;
+		goto free_data_out;
+	}
+
+	smack = kzalloc(count + 1, GFP_KERNEL);
+	if (smack == NULL) {
+		rc = -ENOMEM;
+		goto free_data_out;
+	}
+
+	data[count] = '\0';
+
+	i = sscanf(data, "%x:%x:%x:%x:%x:%x:%x:%x/%u %s",
+			&scanned[0], &scanned[1], &scanned[2], &scanned[3],
+			&scanned[4], &scanned[5], &scanned[6], &scanned[7],
+			&mask, smack);
+	if (i != 10) {
+		i = sscanf(data, "%x:%x:%x:%x:%x:%x:%x:%x %s",
+				&scanned[0], &scanned[1], &scanned[2],
+				&scanned[3], &scanned[4], &scanned[5],
+				&scanned[6], &scanned[7], smack);
+		if (i != 9) {
+			rc = -EINVAL;
+			goto free_out;
+		}
+	}
+	if (mask > 128) {
+		rc = -EINVAL;
+		goto free_out;
+	}
+	for (i = 0; i < 8; i++) {
+		if (scanned[i] > 0xffff) {
+			rc = -EINVAL;
+			goto free_out;
+		}
+		newname.s6_addr16[i] = htons(scanned[i]);
+	}
+
+	/*
+	 * If smack begins with '-', it is an option, don't import it
+	 */
+	if (smack[0] != '-') {
+		skp = smk_import_entry(smack, 0);
+		if (IS_ERR(skp)) {
+			rc = PTR_ERR(skp);
+			goto free_out;
+		}
+	} else {
+		/*
+		 * Only -DELETE is supported for IPv6
+		 */
+		if (strcmp(smack, SMACK_DELETE_OPTION) != 0) {
+			rc = -EINVAL;
+			goto free_out;
+		}
+	}
+
+	for (i = 0, m = mask; i < 8; i++) {
+		if (m >= 16) {
+			fullmask.s6_addr16[i] = 0xffff;
+			m -= 16;
+		} else if (m > 0) {
+			fullmask.s6_addr16[i] = (1 << m) - 1;
+			m = 0;
+		} else
+			fullmask.s6_addr16[i] = 0;
+		newname.s6_addr16[i] &= fullmask.s6_addr16[i];
+	}
+
+	/*
+	 * Only allow one writer at a time. Writes should be
+	 * quite rare and small in any case.
+	 */
+	mutex_lock(&smk_net6addr_lock);
+	/*
+	 * Try to find the prefix in the list
+	 */
+	list_for_each_entry_rcu(snp, &smk_net6addr_list, list) {
+		if (mask != snp->smk_masks)
+			continue;
+		for (found = 1, i = 0; i < 8; i++) {
+			if (newname.s6_addr16[i] !=
+			    snp->smk_host.s6_addr16[i]) {
+				found = 0;
+				break;
+			}
+		}
+		if (found == 1)
+			break;
+	}
+	if (found == 0) {
+		snp = kzalloc(sizeof(*snp), GFP_KERNEL);
+		if (snp == NULL)
+			rc = -ENOMEM;
+		else {
+			snp->smk_host = newname;
+			snp->smk_mask = fullmask;
+			snp->smk_masks = mask;
+			snp->smk_label = skp;
+			smk_net6addr_insert(snp);
+		}
+	} else {
+		snp->smk_label = skp;
+	}
+
+	if (rc == 0)
+		rc = count;
+
+	mutex_unlock(&smk_net6addr_lock);
+
+free_out:
+	kfree(smack);
+free_data_out:
+	kfree(data);
+
+	return rc;
+}
+
+static const struct file_operations smk_net6addr_ops = {
+	.open           = smk_open_net6addr,
+	.read		= seq_read,
+	.llseek         = seq_lseek,
+	.write		= smk_write_net6addr,
+	.release        = seq_release,
+};
+#endif /* CONFIG_IPV6 */
+
+/**
+ * smk_read_doi - read() for /smack/doi
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_doi(struct file *filp, char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	char temp[80];
+	ssize_t rc;
+
+	if (*ppos != 0)
+		return 0;
+
+	sprintf(temp, "%d", smk_cipso_doi_value);
+	rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+	return rc;
+}
+
+/**
+ * smk_write_doi - write() for /smack/doi
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_doi(struct file *file, const char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	char temp[80];
+	int i;
+
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	if (count >= sizeof(temp) || count == 0)
+		return -EINVAL;
+
+	if (copy_from_user(temp, buf, count) != 0)
+		return -EFAULT;
+
+	temp[count] = '\0';
+
+	if (sscanf(temp, "%d", &i) != 1)
+		return -EINVAL;
+
+	smk_cipso_doi_value = i;
+
+	smk_cipso_doi();
+
+	return count;
+}
+
+static const struct file_operations smk_doi_ops = {
+	.read		= smk_read_doi,
+	.write		= smk_write_doi,
+	.llseek		= default_llseek,
+};
+
+/**
+ * smk_read_direct - read() for /smack/direct
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_direct(struct file *filp, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	char temp[80];
+	ssize_t rc;
+
+	if (*ppos != 0)
+		return 0;
+
+	sprintf(temp, "%d", smack_cipso_direct);
+	rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+	return rc;
+}
+
+/**
+ * smk_write_direct - write() for /smack/direct
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_direct(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct smack_known *skp;
+	char temp[80];
+	int i;
+
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	if (count >= sizeof(temp) || count == 0)
+		return -EINVAL;
+
+	if (copy_from_user(temp, buf, count) != 0)
+		return -EFAULT;
+
+	temp[count] = '\0';
+
+	if (sscanf(temp, "%d", &i) != 1)
+		return -EINVAL;
+
+	/*
+	 * Don't do anything if the value hasn't actually changed.
+	 * If it is changing reset the level on entries that were
+	 * set up to be direct when they were created.
+	 */
+	if (smack_cipso_direct != i) {
+		mutex_lock(&smack_known_lock);
+		list_for_each_entry_rcu(skp, &smack_known_list, list)
+			if (skp->smk_netlabel.attr.mls.lvl ==
+			    smack_cipso_direct)
+				skp->smk_netlabel.attr.mls.lvl = i;
+		smack_cipso_direct = i;
+		mutex_unlock(&smack_known_lock);
+	}
+
+	return count;
+}
+
+static const struct file_operations smk_direct_ops = {
+	.read		= smk_read_direct,
+	.write		= smk_write_direct,
+	.llseek		= default_llseek,
+};
+
+/**
+ * smk_read_mapped - read() for /smack/mapped
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_mapped(struct file *filp, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	char temp[80];
+	ssize_t rc;
+
+	if (*ppos != 0)
+		return 0;
+
+	sprintf(temp, "%d", smack_cipso_mapped);
+	rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+	return rc;
+}
+
+/**
+ * smk_write_mapped - write() for /smack/mapped
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_mapped(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct smack_known *skp;
+	char temp[80];
+	int i;
+
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	if (count >= sizeof(temp) || count == 0)
+		return -EINVAL;
+
+	if (copy_from_user(temp, buf, count) != 0)
+		return -EFAULT;
+
+	temp[count] = '\0';
+
+	if (sscanf(temp, "%d", &i) != 1)
+		return -EINVAL;
+
+	/*
+	 * Don't do anything if the value hasn't actually changed.
+	 * If it is changing reset the level on entries that were
+	 * set up to be mapped when they were created.
+	 */
+	if (smack_cipso_mapped != i) {
+		mutex_lock(&smack_known_lock);
+		list_for_each_entry_rcu(skp, &smack_known_list, list)
+			if (skp->smk_netlabel.attr.mls.lvl ==
+			    smack_cipso_mapped)
+				skp->smk_netlabel.attr.mls.lvl = i;
+		smack_cipso_mapped = i;
+		mutex_unlock(&smack_known_lock);
+	}
+
+	return count;
+}
+
+static const struct file_operations smk_mapped_ops = {
+	.read		= smk_read_mapped,
+	.write		= smk_write_mapped,
+	.llseek		= default_llseek,
+};
+
+/**
+ * smk_read_ambient - read() for /smack/ambient
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_ambient(struct file *filp, char __user *buf,
+				size_t cn, loff_t *ppos)
+{
+	ssize_t rc;
+	int asize;
+
+	if (*ppos != 0)
+		return 0;
+	/*
+	 * Being careful to avoid a problem in the case where
+	 * smack_net_ambient gets changed in midstream.
+	 */
+	mutex_lock(&smack_ambient_lock);
+
+	asize = strlen(smack_net_ambient->smk_known) + 1;
+
+	if (cn >= asize)
+		rc = simple_read_from_buffer(buf, cn, ppos,
+					     smack_net_ambient->smk_known,
+					     asize);
+	else
+		rc = -EINVAL;
+
+	mutex_unlock(&smack_ambient_lock);
+
+	return rc;
+}
+
+/**
+ * smk_write_ambient - write() for /smack/ambient
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_ambient(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct smack_known *skp;
+	char *oldambient;
+	char *data;
+	int rc = count;
+
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	data = kzalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	skp = smk_import_entry(data, count);
+	if (IS_ERR(skp)) {
+		rc = PTR_ERR(skp);
+		goto out;
+	}
+
+	mutex_lock(&smack_ambient_lock);
+
+	oldambient = smack_net_ambient->smk_known;
+	smack_net_ambient = skp;
+	smk_unlbl_ambient(oldambient);
+
+	mutex_unlock(&smack_ambient_lock);
+
+out:
+	kfree(data);
+	return rc;
+}
+
+static const struct file_operations smk_ambient_ops = {
+	.read		= smk_read_ambient,
+	.write		= smk_write_ambient,
+	.llseek		= default_llseek,
+};
+
+/*
+ * Seq_file operations for /smack/onlycap
+ */
+static void *onlycap_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return smk_seq_start(s, pos, &smack_onlycap_list);
+}
+
+static void *onlycap_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	return smk_seq_next(s, v, pos, &smack_onlycap_list);
+}
+
+static int onlycap_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head *list = v;
+	struct smack_known_list_elem *sklep =
+		list_entry_rcu(list, struct smack_known_list_elem, list);
+
+	seq_puts(s, sklep->smk_label->smk_known);
+	seq_putc(s, ' ');
+
+	return 0;
+}
+
+static const struct seq_operations onlycap_seq_ops = {
+	.start = onlycap_seq_start,
+	.next  = onlycap_seq_next,
+	.show  = onlycap_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+static int smk_open_onlycap(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &onlycap_seq_ops);
+}
+
+/**
+ * smk_list_swap_rcu - swap public list with a private one in RCU-safe way
+ * The caller must hold appropriate mutex to prevent concurrent modifications
+ * to the public list.
+ * Private list is assumed to be not accessible to other threads yet.
+ *
+ * @public: public list
+ * @private: private list
+ */
+static void smk_list_swap_rcu(struct list_head *public,
+			      struct list_head *private)
+{
+	struct list_head *first, *last;
+
+	if (list_empty(public)) {
+		list_splice_init_rcu(private, public, synchronize_rcu);
+	} else {
+		/* Remember public list before replacing it */
+		first = public->next;
+		last = public->prev;
+
+		/* Publish private list in place of public in RCU-safe way */
+		private->prev->next = public;
+		private->next->prev = public;
+		rcu_assign_pointer(public->next, private->next);
+		public->prev = private->prev;
+
+		synchronize_rcu();
+
+		/* When all readers are done with the old public list,
+		 * attach it in place of private */
+		private->next = first;
+		private->prev = last;
+		first->prev = private;
+		last->next = private;
+	}
+}
+
+/**
+ * smk_parse_label_list - parse list of Smack labels, separated by spaces
+ *
+ * @data: the string to parse
+ * @private: destination list
+ *
+ * Returns zero on success or error code, as appropriate
+ */
+static int smk_parse_label_list(char *data, struct list_head *list)
+{
+	char *tok;
+	struct smack_known *skp;
+	struct smack_known_list_elem *sklep;
+
+	while ((tok = strsep(&data, " ")) != NULL) {
+		if (!*tok)
+			continue;
+
+		skp = smk_import_entry(tok, 0);
+		if (IS_ERR(skp))
+			return PTR_ERR(skp);
+
+		sklep = kzalloc(sizeof(*sklep), GFP_KERNEL);
+		if (sklep == NULL)
+			return -ENOMEM;
+
+		sklep->smk_label = skp;
+		list_add(&sklep->list, list);
+	}
+
+	return 0;
+}
+
+/**
+ * smk_destroy_label_list - destroy a list of smack_known_list_elem
+ * @head: header pointer of the list to destroy
+ */
+void smk_destroy_label_list(struct list_head *list)
+{
+	struct smack_known_list_elem *sklep;
+	struct smack_known_list_elem *sklep2;
+
+	list_for_each_entry_safe(sklep, sklep2, list, list)
+		kfree(sklep);
+
+	INIT_LIST_HEAD(list);
+}
+
+/**
+ * smk_write_onlycap - write() for smackfs/onlycap
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	char *data;
+	LIST_HEAD(list_tmp);
+	int rc;
+
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	data = kzalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		kfree(data);
+		return -EFAULT;
+	}
+
+	rc = smk_parse_label_list(data, &list_tmp);
+	kfree(data);
+
+	/*
+	 * Clear the smack_onlycap on invalid label errors. This means
+	 * that we can pass a null string to unset the onlycap value.
+	 *
+	 * Importing will also reject a label beginning with '-',
+	 * so "-usecapabilities" will also work.
+	 *
+	 * But do so only on invalid label, not on system errors.
+	 * The invalid label must be first to count as clearing attempt.
+	 */
+	if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) {
+		mutex_lock(&smack_onlycap_lock);
+		smk_list_swap_rcu(&smack_onlycap_list, &list_tmp);
+		mutex_unlock(&smack_onlycap_lock);
+		rc = count;
+	}
+
+	smk_destroy_label_list(&list_tmp);
+
+	return rc;
+}
+
+static const struct file_operations smk_onlycap_ops = {
+	.open		= smk_open_onlycap,
+	.read		= seq_read,
+	.write		= smk_write_onlycap,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+/**
+ * smk_read_unconfined - read() for smackfs/unconfined
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_unconfined(struct file *filp, char __user *buf,
+					size_t cn, loff_t *ppos)
+{
+	char *smack = "";
+	ssize_t rc = -EINVAL;
+	int asize;
+
+	if (*ppos != 0)
+		return 0;
+
+	if (smack_unconfined != NULL)
+		smack = smack_unconfined->smk_known;
+
+	asize = strlen(smack) + 1;
+
+	if (cn >= asize)
+		rc = simple_read_from_buffer(buf, cn, ppos, smack, asize);
+
+	return rc;
+}
+
+/**
+ * smk_write_unconfined - write() for smackfs/unconfined
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_unconfined(struct file *file, const char __user *buf,
+					size_t count, loff_t *ppos)
+{
+	char *data;
+	struct smack_known *skp;
+	int rc = count;
+
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	data = kzalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		rc = -EFAULT;
+		goto freeout;
+	}
+
+	/*
+	 * Clear the smack_unconfined on invalid label errors. This means
+	 * that we can pass a null string to unset the unconfined value.
+	 *
+	 * Importing will also reject a label beginning with '-',
+	 * so "-confine" will also work.
+	 *
+	 * But do so only on invalid label, not on system errors.
+	 */
+	skp = smk_import_entry(data, count);
+	if (PTR_ERR(skp) == -EINVAL)
+		skp = NULL;
+	else if (IS_ERR(skp)) {
+		rc = PTR_ERR(skp);
+		goto freeout;
+	}
+
+	smack_unconfined = skp;
+
+freeout:
+	kfree(data);
+	return rc;
+}
+
+static const struct file_operations smk_unconfined_ops = {
+	.read		= smk_read_unconfined,
+	.write		= smk_write_unconfined,
+	.llseek		= default_llseek,
+};
+#endif /* CONFIG_SECURITY_SMACK_BRINGUP */
+
+/**
+ * smk_read_logging - read() for /smack/logging
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_logging(struct file *filp, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	char temp[32];
+	ssize_t rc;
+
+	if (*ppos != 0)
+		return 0;
+
+	sprintf(temp, "%d\n", log_policy);
+	rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+	return rc;
+}
+
+/**
+ * smk_write_logging - write() for /smack/logging
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_logging(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	char temp[32];
+	int i;
+
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	if (count >= sizeof(temp) || count == 0)
+		return -EINVAL;
+
+	if (copy_from_user(temp, buf, count) != 0)
+		return -EFAULT;
+
+	temp[count] = '\0';
+
+	if (sscanf(temp, "%d", &i) != 1)
+		return -EINVAL;
+	if (i < 0 || i > 3)
+		return -EINVAL;
+	log_policy = i;
+	return count;
+}
+
+
+
+static const struct file_operations smk_logging_ops = {
+	.read		= smk_read_logging,
+	.write		= smk_write_logging,
+	.llseek		= default_llseek,
+};
+
+/*
+ * Seq_file read operations for /smack/load-self
+ */
+
+static void *load_self_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct task_smack *tsp = current_security();
+
+	return smk_seq_start(s, pos, &tsp->smk_rules);
+}
+
+static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	struct task_smack *tsp = current_security();
+
+	return smk_seq_next(s, v, pos, &tsp->smk_rules);
+}
+
+static int load_self_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head *list = v;
+	struct smack_rule *srp =
+		list_entry_rcu(list, struct smack_rule, list);
+
+	smk_rule_show(s, srp, SMK_LABELLEN);
+
+	return 0;
+}
+
+static const struct seq_operations load_self_seq_ops = {
+	.start = load_self_seq_start,
+	.next  = load_self_seq_next,
+	.show  = load_self_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+
+/**
+ * smk_open_load_self - open() for /smack/load-self2
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load_self(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &load_self_seq_ops);
+}
+
+/**
+ * smk_write_load_self - write() for /smack/load-self
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load_self(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	struct task_smack *tsp = current_security();
+
+	return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
+				    &tsp->smk_rules_lock, SMK_FIXED24_FMT);
+}
+
+static const struct file_operations smk_load_self_ops = {
+	.open           = smk_open_load_self,
+	.read		= seq_read,
+	.llseek         = seq_lseek,
+	.write		= smk_write_load_self,
+	.release        = seq_release,
+};
+
+/**
+ * smk_user_access - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_user_access(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos, int format)
+{
+	struct smack_parsed_rule rule;
+	char *data;
+	int res;
+
+	data = simple_transaction_get(file, buf, count);
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	if (format == SMK_FIXED24_FMT) {
+		if (count < SMK_LOADLEN)
+			return -EINVAL;
+		res = smk_parse_rule(data, &rule, 0);
+	} else {
+		/*
+		 * simple_transaction_get() returns null-terminated data
+		 */
+		res = smk_parse_long_rule(data, &rule, 0, 3);
+	}
+
+	if (res >= 0)
+		res = smk_access(rule.smk_subject, rule.smk_object,
+				 rule.smk_access1, NULL);
+	else if (res != -ENOENT)
+		return res;
+
+	/*
+	 * smk_access() can return a value > 0 in the "bringup" case.
+	 */
+	data[0] = res >= 0 ? '1' : '0';
+	data[1] = '\0';
+
+	simple_transaction_set(file, 2);
+
+	if (format == SMK_FIXED24_FMT)
+		return SMK_LOADLEN;
+	return count;
+}
+
+/**
+ * smk_write_access - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	return smk_user_access(file, buf, count, ppos, SMK_FIXED24_FMT);
+}
+
+static const struct file_operations smk_access_ops = {
+	.write		= smk_write_access,
+	.read		= simple_transaction_read,
+	.release	= simple_transaction_release,
+	.llseek		= generic_file_llseek,
+};
+
+
+/*
+ * Seq_file read operations for /smack/load2
+ */
+
+static int load2_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head *list = v;
+	struct smack_master_list *smlp =
+		list_entry_rcu(list, struct smack_master_list, list);
+
+	smk_rule_show(s, smlp->smk_rule, SMK_LONGLABEL);
+
+	return 0;
+}
+
+static const struct seq_operations load2_seq_ops = {
+	.start = load2_seq_start,
+	.next  = load2_seq_next,
+	.show  = load2_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+/**
+ * smk_open_load2 - open() for /smack/load2
+ * @inode: inode structure representing file
+ * @file: "load2" file pointer
+ *
+ * For reading, use load2_seq_* seq_file reading operations.
+ */
+static int smk_open_load2(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &load2_seq_ops);
+}
+
+/**
+ * smk_write_load2 - write() for /smack/load2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load2(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	/*
+	 * Must have privilege.
+	 */
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+				    SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_load2_ops = {
+	.open           = smk_open_load2,
+	.read		= seq_read,
+	.llseek         = seq_lseek,
+	.write		= smk_write_load2,
+	.release        = seq_release,
+};
+
+/*
+ * Seq_file read operations for /smack/load-self2
+ */
+
+static void *load_self2_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct task_smack *tsp = current_security();
+
+	return smk_seq_start(s, pos, &tsp->smk_rules);
+}
+
+static void *load_self2_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	struct task_smack *tsp = current_security();
+
+	return smk_seq_next(s, v, pos, &tsp->smk_rules);
+}
+
+static int load_self2_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head *list = v;
+	struct smack_rule *srp =
+		list_entry_rcu(list, struct smack_rule, list);
+
+	smk_rule_show(s, srp, SMK_LONGLABEL);
+
+	return 0;
+}
+
+static const struct seq_operations load_self2_seq_ops = {
+	.start = load_self2_seq_start,
+	.next  = load_self2_seq_next,
+	.show  = load_self2_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+/**
+ * smk_open_load_self2 - open() for /smack/load-self2
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load_self2(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &load_self2_seq_ops);
+}
+
+/**
+ * smk_write_load_self2 - write() for /smack/load-self2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load_self2(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	struct task_smack *tsp = current_security();
+
+	return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
+				    &tsp->smk_rules_lock, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_load_self2_ops = {
+	.open           = smk_open_load_self2,
+	.read		= seq_read,
+	.llseek         = seq_lseek,
+	.write		= smk_write_load_self2,
+	.release        = seq_release,
+};
+
+/**
+ * smk_write_access2 - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access2(struct file *file, const char __user *buf,
+					size_t count, loff_t *ppos)
+{
+	return smk_user_access(file, buf, count, ppos, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_access2_ops = {
+	.write		= smk_write_access2,
+	.read		= simple_transaction_read,
+	.release	= simple_transaction_release,
+	.llseek		= generic_file_llseek,
+};
+
+/**
+ * smk_write_revoke_subj - write() for /smack/revoke-subject
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_revoke_subj(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	char *data;
+	const char *cp;
+	struct smack_known *skp;
+	struct smack_rule *sp;
+	struct list_head *rule_list;
+	struct mutex *rule_lock;
+	int rc = count;
+
+	if (*ppos != 0)
+		return -EINVAL;
+
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	if (count == 0 || count > SMK_LONGLABEL)
+		return -EINVAL;
+
+	data = kzalloc(count, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		rc = -EFAULT;
+		goto out_data;
+	}
+
+	cp = smk_parse_smack(data, count);
+	if (IS_ERR(cp)) {
+		rc = PTR_ERR(cp);
+		goto out_data;
+	}
+
+	skp = smk_find_entry(cp);
+	if (skp == NULL)
+		goto out_cp;
+
+	rule_list = &skp->smk_rules;
+	rule_lock = &skp->smk_rules_lock;
+
+	mutex_lock(rule_lock);
+
+	list_for_each_entry_rcu(sp, rule_list, list)
+		sp->smk_access = 0;
+
+	mutex_unlock(rule_lock);
+
+out_cp:
+	kfree(cp);
+out_data:
+	kfree(data);
+
+	return rc;
+}
+
+static const struct file_operations smk_revoke_subj_ops = {
+	.write		= smk_write_revoke_subj,
+	.read		= simple_transaction_read,
+	.release	= simple_transaction_release,
+	.llseek		= generic_file_llseek,
+};
+
+/**
+ * smk_init_sysfs - initialize /sys/fs/smackfs
+ *
+ */
+static int smk_init_sysfs(void)
+{
+	return sysfs_create_mount_point(fs_kobj, "smackfs");
+}
+
+/**
+ * smk_write_change_rule - write() for /smack/change-rule
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_change_rule(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	/*
+	 * Must have privilege.
+	 */
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+				    SMK_CHANGE_FMT);
+}
+
+static const struct file_operations smk_change_rule_ops = {
+	.write		= smk_write_change_rule,
+	.read		= simple_transaction_read,
+	.release	= simple_transaction_release,
+	.llseek		= generic_file_llseek,
+};
+
+/**
+ * smk_read_syslog - read() for smackfs/syslog
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_syslog(struct file *filp, char __user *buf,
+				size_t cn, loff_t *ppos)
+{
+	struct smack_known *skp;
+	ssize_t rc = -EINVAL;
+	int asize;
+
+	if (*ppos != 0)
+		return 0;
+
+	if (smack_syslog_label == NULL)
+		skp = &smack_known_star;
+	else
+		skp = smack_syslog_label;
+
+	asize = strlen(skp->smk_known) + 1;
+
+	if (cn >= asize)
+		rc = simple_read_from_buffer(buf, cn, ppos, skp->smk_known,
+						asize);
+
+	return rc;
+}
+
+/**
+ * smk_write_syslog - write() for smackfs/syslog
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_syslog(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	char *data;
+	struct smack_known *skp;
+	int rc = count;
+
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	data = kzalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0)
+		rc = -EFAULT;
+	else {
+		skp = smk_import_entry(data, count);
+		if (IS_ERR(skp))
+			rc = PTR_ERR(skp);
+		else
+			smack_syslog_label = skp;
+	}
+
+	kfree(data);
+	return rc;
+}
+
+static const struct file_operations smk_syslog_ops = {
+	.read		= smk_read_syslog,
+	.write		= smk_write_syslog,
+	.llseek		= default_llseek,
+};
+
+/*
+ * Seq_file read operations for /smack/relabel-self
+ */
+
+static void *relabel_self_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct task_smack *tsp = current_security();
+
+	return smk_seq_start(s, pos, &tsp->smk_relabel);
+}
+
+static void *relabel_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	struct task_smack *tsp = current_security();
+
+	return smk_seq_next(s, v, pos, &tsp->smk_relabel);
+}
+
+static int relabel_self_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head *list = v;
+	struct smack_known_list_elem *sklep =
+		list_entry(list, struct smack_known_list_elem, list);
+
+	seq_puts(s, sklep->smk_label->smk_known);
+	seq_putc(s, ' ');
+
+	return 0;
+}
+
+static const struct seq_operations relabel_self_seq_ops = {
+	.start = relabel_self_seq_start,
+	.next  = relabel_self_seq_next,
+	.show  = relabel_self_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+/**
+ * smk_open_relabel_self - open() for /smack/relabel-self
+ * @inode: inode structure representing file
+ * @file: "relabel-self" file pointer
+ *
+ * Connect our relabel_self_seq_* operations with /smack/relabel-self
+ * file_operations
+ */
+static int smk_open_relabel_self(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &relabel_self_seq_ops);
+}
+
+/**
+ * smk_write_relabel_self - write() for /smack/relabel-self
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct task_smack *tsp = current_security();
+	char *data;
+	int rc;
+	LIST_HEAD(list_tmp);
+
+	/*
+	 * Must have privilege.
+	 */
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	/*
+	 * Enough data must be present.
+	 */
+	if (*ppos != 0)
+		return -EINVAL;
+
+	data = kzalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		kfree(data);
+		return -EFAULT;
+	}
+
+	rc = smk_parse_label_list(data, &list_tmp);
+	kfree(data);
+
+	if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) {
+		smk_destroy_label_list(&tsp->smk_relabel);
+		list_splice(&list_tmp, &tsp->smk_relabel);
+		return count;
+	}
+
+	smk_destroy_label_list(&list_tmp);
+	return rc;
+}
+
+static const struct file_operations smk_relabel_self_ops = {
+	.open		= smk_open_relabel_self,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.write		= smk_write_relabel_self,
+	.release	= seq_release,
+};
+
+/**
+ * smk_read_ptrace - read() for /smack/ptrace
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_ptrace(struct file *filp, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	char temp[32];
+	ssize_t rc;
+
+	if (*ppos != 0)
+		return 0;
+
+	sprintf(temp, "%d\n", smack_ptrace_rule);
+	rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+	return rc;
+}
+
+/**
+ * smk_write_ptrace - write() for /smack/ptrace
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_ptrace(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	char temp[32];
+	int i;
+
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	if (*ppos != 0 || count >= sizeof(temp) || count == 0)
+		return -EINVAL;
+
+	if (copy_from_user(temp, buf, count) != 0)
+		return -EFAULT;
+
+	temp[count] = '\0';
+
+	if (sscanf(temp, "%d", &i) != 1)
+		return -EINVAL;
+	if (i < SMACK_PTRACE_DEFAULT || i > SMACK_PTRACE_MAX)
+		return -EINVAL;
+	smack_ptrace_rule = i;
+
+	return count;
+}
+
+static const struct file_operations smk_ptrace_ops = {
+	.write		= smk_write_ptrace,
+	.read		= smk_read_ptrace,
+	.llseek		= default_llseek,
+};
+
+/**
+ * smk_fill_super - fill the smackfs superblock
+ * @sb: the empty superblock
+ * @data: unused
+ * @silent: unused
+ *
+ * Fill in the well known entries for the smack filesystem
+ *
+ * Returns 0 on success, an error code on failure
+ */
+static int smk_fill_super(struct super_block *sb, void *data, int silent)
+{
+	int rc;
+	struct inode *root_inode;
+
+	static struct tree_descr smack_files[] = {
+		[SMK_LOAD] = {
+			"load", &smk_load_ops, S_IRUGO|S_IWUSR},
+		[SMK_CIPSO] = {
+			"cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR},
+		[SMK_DOI] = {
+			"doi", &smk_doi_ops, S_IRUGO|S_IWUSR},
+		[SMK_DIRECT] = {
+			"direct", &smk_direct_ops, S_IRUGO|S_IWUSR},
+		[SMK_AMBIENT] = {
+			"ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR},
+		[SMK_NET4ADDR] = {
+			"netlabel", &smk_net4addr_ops, S_IRUGO|S_IWUSR},
+		[SMK_ONLYCAP] = {
+			"onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR},
+		[SMK_LOGGING] = {
+			"logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
+		[SMK_LOAD_SELF] = {
+			"load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO},
+		[SMK_ACCESSES] = {
+			"access", &smk_access_ops, S_IRUGO|S_IWUGO},
+		[SMK_MAPPED] = {
+			"mapped", &smk_mapped_ops, S_IRUGO|S_IWUSR},
+		[SMK_LOAD2] = {
+			"load2", &smk_load2_ops, S_IRUGO|S_IWUSR},
+		[SMK_LOAD_SELF2] = {
+			"load-self2", &smk_load_self2_ops, S_IRUGO|S_IWUGO},
+		[SMK_ACCESS2] = {
+			"access2", &smk_access2_ops, S_IRUGO|S_IWUGO},
+		[SMK_CIPSO2] = {
+			"cipso2", &smk_cipso2_ops, S_IRUGO|S_IWUSR},
+		[SMK_REVOKE_SUBJ] = {
+			"revoke-subject", &smk_revoke_subj_ops,
+			S_IRUGO|S_IWUSR},
+		[SMK_CHANGE_RULE] = {
+			"change-rule", &smk_change_rule_ops, S_IRUGO|S_IWUSR},
+		[SMK_SYSLOG] = {
+			"syslog", &smk_syslog_ops, S_IRUGO|S_IWUSR},
+		[SMK_PTRACE] = {
+			"ptrace", &smk_ptrace_ops, S_IRUGO|S_IWUSR},
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+		[SMK_UNCONFINED] = {
+			"unconfined", &smk_unconfined_ops, S_IRUGO|S_IWUSR},
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
+		[SMK_NET6ADDR] = {
+			"ipv6host", &smk_net6addr_ops, S_IRUGO|S_IWUSR},
+#endif /* CONFIG_IPV6 */
+		[SMK_RELABEL_SELF] = {
+			"relabel-self", &smk_relabel_self_ops,
+				S_IRUGO|S_IWUGO},
+		/* last one */
+			{""}
+	};
+
+	rc = simple_fill_super(sb, SMACK_MAGIC, smack_files);
+	if (rc != 0) {
+		printk(KERN_ERR "%s failed %d while creating inodes\n",
+			__func__, rc);
+		return rc;
+	}
+
+	root_inode = d_inode(sb->s_root);
+
+	return 0;
+}
+
+/**
+ * smk_mount - get the smackfs superblock
+ * @fs_type: passed along without comment
+ * @flags: passed along without comment
+ * @dev_name: passed along without comment
+ * @data: passed along without comment
+ *
+ * Just passes everything along.
+ *
+ * Returns what the lower level code does.
+ */
+static struct dentry *smk_mount(struct file_system_type *fs_type,
+		      int flags, const char *dev_name, void *data)
+{
+	return mount_single(fs_type, flags, data, smk_fill_super);
+}
+
+static struct file_system_type smk_fs_type = {
+	.name		= "smackfs",
+	.mount		= smk_mount,
+	.kill_sb	= kill_litter_super,
+};
+
+static struct vfsmount *smackfs_mount;
+
+static int __init smk_preset_netlabel(struct smack_known *skp)
+{
+	skp->smk_netlabel.domain = skp->smk_known;
+	skp->smk_netlabel.flags =
+		NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL;
+	return smk_netlbl_mls(smack_cipso_direct, skp->smk_known,
+				&skp->smk_netlabel, strlen(skp->smk_known));
+}
+
+/**
+ * init_smk_fs - get the smackfs superblock
+ *
+ * register the smackfs
+ *
+ * Do not register smackfs if Smack wasn't enabled
+ * on boot. We can not put this method normally under the
+ * smack_init() code path since the security subsystem get
+ * initialized before the vfs caches.
+ *
+ * Returns true if we were not chosen on boot or if
+ * we were chosen and filesystem registration succeeded.
+ */
+static int __init init_smk_fs(void)
+{
+	int err;
+	int rc;
+
+	if (smack_enabled == 0)
+		return 0;
+
+	err = smk_init_sysfs();
+	if (err)
+		printk(KERN_ERR "smackfs: sysfs mountpoint problem.\n");
+
+	err = register_filesystem(&smk_fs_type);
+	if (!err) {
+		smackfs_mount = kern_mount(&smk_fs_type);
+		if (IS_ERR(smackfs_mount)) {
+			printk(KERN_ERR "smackfs:  could not mount!\n");
+			err = PTR_ERR(smackfs_mount);
+			smackfs_mount = NULL;
+		}
+	}
+
+	smk_cipso_doi();
+	smk_unlbl_ambient(NULL);
+
+	rc = smk_preset_netlabel(&smack_known_floor);
+	if (err == 0 && rc < 0)
+		err = rc;
+	rc = smk_preset_netlabel(&smack_known_hat);
+	if (err == 0 && rc < 0)
+		err = rc;
+	rc = smk_preset_netlabel(&smack_known_huh);
+	if (err == 0 && rc < 0)
+		err = rc;
+	rc = smk_preset_netlabel(&smack_known_invalid);
+	if (err == 0 && rc < 0)
+		err = rc;
+	rc = smk_preset_netlabel(&smack_known_star);
+	if (err == 0 && rc < 0)
+		err = rc;
+	rc = smk_preset_netlabel(&smack_known_web);
+	if (err == 0 && rc < 0)
+		err = rc;
+
+	return err;
+}
+
+__initcall(init_smk_fs);
diff --git a/security/tomoyo/.gitignore b/security/tomoyo/.gitignore
new file mode 100644
index 0000000..dc0f220
--- /dev/null
+++ b/security/tomoyo/.gitignore
@@ -0,0 +1,2 @@
+builtin-policy.h
+policy/*.conf
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
new file mode 100644
index 0000000..404dce6
--- /dev/null
+++ b/security/tomoyo/Kconfig
@@ -0,0 +1,76 @@
+config SECURITY_TOMOYO
+	bool "TOMOYO Linux Support"
+	depends on SECURITY
+	depends on NET
+	select SECURITYFS
+	select SECURITY_PATH
+	select SECURITY_NETWORK
+	select SRCU
+	select BUILD_BIN2C
+	default n
+	help
+	  This selects TOMOYO Linux, pathname-based access control.
+	  Required userspace tools and further information may be
+	  found at <http://tomoyo.sourceforge.jp/>.
+	  If you are unsure how to answer this question, answer N.
+
+config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
+	int "Default maximal count for learning mode"
+	default 2048
+	range 0 2147483647
+	depends on SECURITY_TOMOYO
+	help
+	  This is the default value for maximal ACL entries
+	  that are automatically appended into policy at "learning mode".
+	  Some programs access thousands of objects, so running
+	  such programs in "learning mode" dulls the system response
+	  and consumes much memory.
+	  This is the safeguard for such programs.
+
+config SECURITY_TOMOYO_MAX_AUDIT_LOG
+	int "Default maximal count for audit log"
+	default 1024
+	range 0 2147483647
+	depends on SECURITY_TOMOYO
+	help
+	  This is the default value for maximal entries for
+	  audit logs that the kernel can hold on memory.
+	  You can read the log via /sys/kernel/security/tomoyo/audit.
+	  If you don't need audit logs, you may set this value to 0.
+
+config SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+	bool "Activate without calling userspace policy loader."
+	default n
+	depends on SECURITY_TOMOYO
+	---help---
+	  Say Y here if you want to activate access control as soon as built-in
+	  policy was loaded. This option will be useful for systems where
+	  operations which can lead to the hijacking of the boot sequence are
+	  needed before loading the policy. For example, you can activate
+	  immediately after loading the fixed part of policy which will allow
+	  only operations needed for mounting a partition which contains the
+	  variant part of policy and verifying (e.g. running GPG check) and
+	  loading the variant part of policy. Since you can start using
+	  enforcing mode from the beginning, you can reduce the possibility of
+	  hijacking the boot sequence.
+
+config SECURITY_TOMOYO_POLICY_LOADER
+	string "Location of userspace policy loader"
+	default "/sbin/tomoyo-init"
+	depends on SECURITY_TOMOYO
+	depends on !SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+	---help---
+	  This is the default pathname of policy loader which is called before
+	  activation. You can override this setting via TOMOYO_loader= kernel
+	  command line option.
+
+config SECURITY_TOMOYO_ACTIVATION_TRIGGER
+	string "Trigger for calling userspace policy loader"
+	default "/sbin/init"
+	depends on SECURITY_TOMOYO
+	depends on !SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+	---help---
+	  This is the default pathname of activation trigger.
+	  You can override this setting via TOMOYO_trigger= kernel command line
+	  option. For example, if you pass init=/bin/systemd option, you may
+	  want to also pass TOMOYO_trigger=/bin/systemd option.
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
new file mode 100644
index 0000000..65dbcb2
--- /dev/null
+++ b/security/tomoyo/Makefile
@@ -0,0 +1,15 @@
+obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
+
+targets += builtin-policy.h
+define do_policy
+echo "static char tomoyo_builtin_$(1)[] __initdata ="; \
+$(objtree)/scripts/basic/bin2c <$(firstword $(wildcard $(obj)/policy/$(1).conf $(srctree)/$(src)/policy/$(1).conf.default) /dev/null); \
+echo ";"
+endef
+quiet_cmd_policy  = POLICY  $@
+      cmd_policy  = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@
+
+$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE
+	$(call if_changed,policy)
+
+$(obj)/common.o: $(obj)/builtin-policy.h
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
new file mode 100644
index 0000000..3ffa4f5
--- /dev/null
+++ b/security/tomoyo/audit.c
@@ -0,0 +1,468 @@
+/*
+ * security/tomoyo/audit.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/**
+ * tomoyo_print_bprm - Print "struct linux_binprm" for auditing.
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ * @dump: Pointer to "struct tomoyo_page_dump".
+ *
+ * Returns the contents of @bprm on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+static char *tomoyo_print_bprm(struct linux_binprm *bprm,
+			       struct tomoyo_page_dump *dump)
+{
+	static const int tomoyo_buffer_len = 4096 * 2;
+	char *buffer = kzalloc(tomoyo_buffer_len, GFP_NOFS);
+	char *cp;
+	char *last_start;
+	int len;
+	unsigned long pos = bprm->p;
+	int offset = pos % PAGE_SIZE;
+	int argv_count = bprm->argc;
+	int envp_count = bprm->envc;
+	bool truncated = false;
+	if (!buffer)
+		return NULL;
+	len = snprintf(buffer, tomoyo_buffer_len - 1, "argv[]={ ");
+	cp = buffer + len;
+	if (!argv_count) {
+		memmove(cp, "} envp[]={ ", 11);
+		cp += 11;
+	}
+	last_start = cp;
+	while (argv_count || envp_count) {
+		if (!tomoyo_dump_page(bprm, pos, dump))
+			goto out;
+		pos += PAGE_SIZE - offset;
+		/* Read. */
+		while (offset < PAGE_SIZE) {
+			const char *kaddr = dump->data;
+			const unsigned char c = kaddr[offset++];
+			if (cp == last_start)
+				*cp++ = '"';
+			if (cp >= buffer + tomoyo_buffer_len - 32) {
+				/* Reserve some room for "..." string. */
+				truncated = true;
+			} else if (c == '\\') {
+				*cp++ = '\\';
+				*cp++ = '\\';
+			} else if (c > ' ' && c < 127) {
+				*cp++ = c;
+			} else if (!c) {
+				*cp++ = '"';
+				*cp++ = ' ';
+				last_start = cp;
+			} else {
+				*cp++ = '\\';
+				*cp++ = (c >> 6) + '0';
+				*cp++ = ((c >> 3) & 7) + '0';
+				*cp++ = (c & 7) + '0';
+			}
+			if (c)
+				continue;
+			if (argv_count) {
+				if (--argv_count == 0) {
+					if (truncated) {
+						cp = last_start;
+						memmove(cp, "... ", 4);
+						cp += 4;
+					}
+					memmove(cp, "} envp[]={ ", 11);
+					cp += 11;
+					last_start = cp;
+					truncated = false;
+				}
+			} else if (envp_count) {
+				if (--envp_count == 0) {
+					if (truncated) {
+						cp = last_start;
+						memmove(cp, "... ", 4);
+						cp += 4;
+					}
+				}
+			}
+			if (!argv_count && !envp_count)
+				break;
+		}
+		offset = 0;
+	}
+	*cp++ = '}';
+	*cp = '\0';
+	return buffer;
+out:
+	snprintf(buffer, tomoyo_buffer_len - 1,
+		 "argv[]={ ... } envp[]= { ... }");
+	return buffer;
+}
+
+/**
+ * tomoyo_filetype - Get string representation of file type.
+ *
+ * @mode: Mode value for stat().
+ *
+ * Returns file type string.
+ */
+static inline const char *tomoyo_filetype(const umode_t mode)
+{
+	switch (mode & S_IFMT) {
+	case S_IFREG:
+	case 0:
+		return tomoyo_condition_keyword[TOMOYO_TYPE_IS_FILE];
+	case S_IFDIR:
+		return tomoyo_condition_keyword[TOMOYO_TYPE_IS_DIRECTORY];
+	case S_IFLNK:
+		return tomoyo_condition_keyword[TOMOYO_TYPE_IS_SYMLINK];
+	case S_IFIFO:
+		return tomoyo_condition_keyword[TOMOYO_TYPE_IS_FIFO];
+	case S_IFSOCK:
+		return tomoyo_condition_keyword[TOMOYO_TYPE_IS_SOCKET];
+	case S_IFBLK:
+		return tomoyo_condition_keyword[TOMOYO_TYPE_IS_BLOCK_DEV];
+	case S_IFCHR:
+		return tomoyo_condition_keyword[TOMOYO_TYPE_IS_CHAR_DEV];
+	}
+	return "unknown"; /* This should not happen. */
+}
+
+/**
+ * tomoyo_print_header - Get header line of audit log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns string representation.
+ *
+ * This function uses kmalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+static char *tomoyo_print_header(struct tomoyo_request_info *r)
+{
+	struct tomoyo_time stamp;
+	const pid_t gpid = task_pid_nr(current);
+	struct tomoyo_obj_info *obj = r->obj;
+	static const int tomoyo_buffer_len = 4096;
+	char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
+	int pos;
+	u8 i;
+	if (!buffer)
+		return NULL;
+
+	tomoyo_convert_time(get_seconds(), &stamp);
+
+	pos = snprintf(buffer, tomoyo_buffer_len - 1,
+		       "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s "
+		       "granted=%s (global-pid=%u) task={ pid=%u ppid=%u "
+		       "uid=%u gid=%u euid=%u egid=%u suid=%u sgid=%u "
+		       "fsuid=%u fsgid=%u }", stamp.year, stamp.month,
+		       stamp.day, stamp.hour, stamp.min, stamp.sec, r->profile,
+		       tomoyo_mode[r->mode], tomoyo_yesno(r->granted), gpid,
+		       tomoyo_sys_getpid(), tomoyo_sys_getppid(),
+		       from_kuid(&init_user_ns, current_uid()),
+		       from_kgid(&init_user_ns, current_gid()),
+		       from_kuid(&init_user_ns, current_euid()),
+		       from_kgid(&init_user_ns, current_egid()),
+		       from_kuid(&init_user_ns, current_suid()),
+		       from_kgid(&init_user_ns, current_sgid()),
+		       from_kuid(&init_user_ns, current_fsuid()),
+		       from_kgid(&init_user_ns, current_fsgid()));
+	if (!obj)
+		goto no_obj_info;
+	if (!obj->validate_done) {
+		tomoyo_get_attributes(obj);
+		obj->validate_done = true;
+	}
+	for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) {
+		struct tomoyo_mini_stat *stat;
+		unsigned int dev;
+		umode_t mode;
+		if (!obj->stat_valid[i])
+			continue;
+		stat = &obj->stat[i];
+		dev = stat->dev;
+		mode = stat->mode;
+		if (i & 1) {
+			pos += snprintf(buffer + pos,
+					tomoyo_buffer_len - 1 - pos,
+					" path%u.parent={ uid=%u gid=%u "
+					"ino=%lu perm=0%o }", (i >> 1) + 1,
+					from_kuid(&init_user_ns, stat->uid),
+					from_kgid(&init_user_ns, stat->gid),
+					(unsigned long)stat->ino,
+					stat->mode & S_IALLUGO);
+			continue;
+		}
+		pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos,
+				" path%u={ uid=%u gid=%u ino=%lu major=%u"
+				" minor=%u perm=0%o type=%s", (i >> 1) + 1,
+				from_kuid(&init_user_ns, stat->uid),
+				from_kgid(&init_user_ns, stat->gid),
+				(unsigned long)stat->ino,
+				MAJOR(dev), MINOR(dev),
+				mode & S_IALLUGO, tomoyo_filetype(mode));
+		if (S_ISCHR(mode) || S_ISBLK(mode)) {
+			dev = stat->rdev;
+			pos += snprintf(buffer + pos,
+					tomoyo_buffer_len - 1 - pos,
+					" dev_major=%u dev_minor=%u",
+					MAJOR(dev), MINOR(dev));
+		}
+		pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos,
+				" }");
+	}
+no_obj_info:
+	if (pos < tomoyo_buffer_len - 1)
+		return buffer;
+	kfree(buffer);
+	return NULL;
+}
+
+/**
+ * tomoyo_init_log - Allocate buffer for audit logs.
+ *
+ * @r:    Pointer to "struct tomoyo_request_info".
+ * @len:  Buffer size needed for @fmt and @args.
+ * @fmt:  The printf()'s format string.
+ * @args: va_list structure for @fmt.
+ *
+ * Returns pointer to allocated memory.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
+		      va_list args)
+{
+	char *buf = NULL;
+	char *bprm_info = NULL;
+	const char *header = NULL;
+	char *realpath = NULL;
+	const char *symlink = NULL;
+	int pos;
+	const char *domainname = r->domain->domainname->name;
+	header = tomoyo_print_header(r);
+	if (!header)
+		return NULL;
+	/* +10 is for '\n' etc. and '\0'. */
+	len += strlen(domainname) + strlen(header) + 10;
+	if (r->ee) {
+		struct file *file = r->ee->bprm->file;
+		realpath = tomoyo_realpath_from_path(&file->f_path);
+		bprm_info = tomoyo_print_bprm(r->ee->bprm, &r->ee->dump);
+		if (!realpath || !bprm_info)
+			goto out;
+		/* +80 is for " exec={ realpath=\"%s\" argc=%d envc=%d %s }" */
+		len += strlen(realpath) + 80 + strlen(bprm_info);
+	} else if (r->obj && r->obj->symlink_target) {
+		symlink = r->obj->symlink_target->name;
+		/* +18 is for " symlink.target=\"%s\"" */
+		len += 18 + strlen(symlink);
+	}
+	len = tomoyo_round2(len);
+	buf = kzalloc(len, GFP_NOFS);
+	if (!buf)
+		goto out;
+	len--;
+	pos = snprintf(buf, len, "%s", header);
+	if (realpath) {
+		struct linux_binprm *bprm = r->ee->bprm;
+		pos += snprintf(buf + pos, len - pos,
+				" exec={ realpath=\"%s\" argc=%d envc=%d %s }",
+				realpath, bprm->argc, bprm->envc, bprm_info);
+	} else if (symlink)
+		pos += snprintf(buf + pos, len - pos, " symlink.target=\"%s\"",
+				symlink);
+	pos += snprintf(buf + pos, len - pos, "\n%s\n", domainname);
+	vsnprintf(buf + pos, len - pos, fmt, args);
+out:
+	kfree(realpath);
+	kfree(bprm_info);
+	kfree(header);
+	return buf;
+}
+
+/* Wait queue for /sys/kernel/security/tomoyo/audit. */
+static DECLARE_WAIT_QUEUE_HEAD(tomoyo_log_wait);
+
+/* Structure for audit log. */
+struct tomoyo_log {
+	struct list_head list;
+	char *log;
+	int size;
+};
+
+/* The list for "struct tomoyo_log". */
+static LIST_HEAD(tomoyo_log);
+
+/* Lock for "struct list_head tomoyo_log". */
+static DEFINE_SPINLOCK(tomoyo_log_lock);
+
+/* Length of "stuct list_head tomoyo_log". */
+static unsigned int tomoyo_log_count;
+
+/**
+ * tomoyo_get_audit - Get audit mode.
+ *
+ * @ns:          Pointer to "struct tomoyo_policy_namespace".
+ * @profile:     Profile number.
+ * @index:       Index number of functionality.
+ * @is_granted:  True if granted log, false otherwise.
+ *
+ * Returns true if this request should be audited, false otherwise.
+ */
+static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns,
+			     const u8 profile, const u8 index,
+			     const struct tomoyo_acl_info *matched_acl,
+			     const bool is_granted)
+{
+	u8 mode;
+	const u8 category = tomoyo_index2category[index] +
+		TOMOYO_MAX_MAC_INDEX;
+	struct tomoyo_profile *p;
+	if (!tomoyo_policy_loaded)
+		return false;
+	p = tomoyo_profile(ns, profile);
+	if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG])
+		return false;
+	if (is_granted && matched_acl && matched_acl->cond &&
+	    matched_acl->cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+		return matched_acl->cond->grant_log == TOMOYO_GRANTLOG_YES;
+	mode = p->config[index];
+	if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+		mode = p->config[category];
+	if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+		mode = p->default_config;
+	if (is_granted)
+		return mode & TOMOYO_CONFIG_WANT_GRANT_LOG;
+	return mode & TOMOYO_CONFIG_WANT_REJECT_LOG;
+}
+
+/**
+ * tomoyo_write_log2 - Write an audit log.
+ *
+ * @r:    Pointer to "struct tomoyo_request_info".
+ * @len:  Buffer size needed for @fmt and @args.
+ * @fmt:  The printf()'s format string.
+ * @args: va_list structure for @fmt.
+ *
+ * Returns nothing.
+ */
+void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
+		       va_list args)
+{
+	char *buf;
+	struct tomoyo_log *entry;
+	bool quota_exceeded = false;
+	if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type,
+			      r->matched_acl, r->granted))
+		goto out;
+	buf = tomoyo_init_log(r, len, fmt, args);
+	if (!buf)
+		goto out;
+	entry = kzalloc(sizeof(*entry), GFP_NOFS);
+	if (!entry) {
+		kfree(buf);
+		goto out;
+	}
+	entry->log = buf;
+	len = tomoyo_round2(strlen(buf) + 1);
+	/*
+	 * The entry->size is used for memory quota checks.
+	 * Don't go beyond strlen(entry->log).
+	 */
+	entry->size = len + tomoyo_round2(sizeof(*entry));
+	spin_lock(&tomoyo_log_lock);
+	if (tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT] &&
+	    tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] + entry->size >=
+	    tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT]) {
+		quota_exceeded = true;
+	} else {
+		tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] += entry->size;
+		list_add_tail(&entry->list, &tomoyo_log);
+		tomoyo_log_count++;
+	}
+	spin_unlock(&tomoyo_log_lock);
+	if (quota_exceeded) {
+		kfree(buf);
+		kfree(entry);
+		goto out;
+	}
+	wake_up(&tomoyo_log_wait);
+out:
+	return;
+}
+
+/**
+ * tomoyo_write_log - Write an audit log.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @fmt: The printf()'s format string, followed by parameters.
+ *
+ * Returns nothing.
+ */
+void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...)
+{
+	va_list args;
+	int len;
+	va_start(args, fmt);
+	len = vsnprintf((char *) &len, 1, fmt, args) + 1;
+	va_end(args);
+	va_start(args, fmt);
+	tomoyo_write_log2(r, len, fmt, args);
+	va_end(args);
+}
+
+/**
+ * tomoyo_read_log - Read an audit log.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+void tomoyo_read_log(struct tomoyo_io_buffer *head)
+{
+	struct tomoyo_log *ptr = NULL;
+	if (head->r.w_pos)
+		return;
+	kfree(head->read_buf);
+	head->read_buf = NULL;
+	spin_lock(&tomoyo_log_lock);
+	if (!list_empty(&tomoyo_log)) {
+		ptr = list_entry(tomoyo_log.next, typeof(*ptr), list);
+		list_del(&ptr->list);
+		tomoyo_log_count--;
+		tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] -= ptr->size;
+	}
+	spin_unlock(&tomoyo_log_lock);
+	if (ptr) {
+		head->read_buf = ptr->log;
+		head->r.w[head->r.w_pos++] = head->read_buf;
+		kfree(ptr);
+	}
+}
+
+/**
+ * tomoyo_poll_log - Wait for an audit log.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table". Maybe NULL.
+ *
+ * Returns POLLIN | POLLRDNORM when ready to read an audit log.
+ */
+unsigned int tomoyo_poll_log(struct file *file, poll_table *wait)
+{
+	if (tomoyo_log_count)
+		return POLLIN | POLLRDNORM;
+	poll_wait(file, &tomoyo_log_wait, wait);
+	if (tomoyo_log_count)
+		return POLLIN | POLLRDNORM;
+	return 0;
+}
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
new file mode 100644
index 0000000..e0fb750
--- /dev/null
+++ b/security/tomoyo/common.c
@@ -0,0 +1,2789 @@
+/*
+ * security/tomoyo/common.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include "common.h"
+
+/* String table for operation mode. */
+const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = {
+	[TOMOYO_CONFIG_DISABLED]   = "disabled",
+	[TOMOYO_CONFIG_LEARNING]   = "learning",
+	[TOMOYO_CONFIG_PERMISSIVE] = "permissive",
+	[TOMOYO_CONFIG_ENFORCING]  = "enforcing"
+};
+
+/* String table for /sys/kernel/security/tomoyo/profile */
+const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+				       + TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
+	/* CONFIG::file group */
+	[TOMOYO_MAC_FILE_EXECUTE]    = "execute",
+	[TOMOYO_MAC_FILE_OPEN]       = "open",
+	[TOMOYO_MAC_FILE_CREATE]     = "create",
+	[TOMOYO_MAC_FILE_UNLINK]     = "unlink",
+	[TOMOYO_MAC_FILE_GETATTR]    = "getattr",
+	[TOMOYO_MAC_FILE_MKDIR]      = "mkdir",
+	[TOMOYO_MAC_FILE_RMDIR]      = "rmdir",
+	[TOMOYO_MAC_FILE_MKFIFO]     = "mkfifo",
+	[TOMOYO_MAC_FILE_MKSOCK]     = "mksock",
+	[TOMOYO_MAC_FILE_TRUNCATE]   = "truncate",
+	[TOMOYO_MAC_FILE_SYMLINK]    = "symlink",
+	[TOMOYO_MAC_FILE_MKBLOCK]    = "mkblock",
+	[TOMOYO_MAC_FILE_MKCHAR]     = "mkchar",
+	[TOMOYO_MAC_FILE_LINK]       = "link",
+	[TOMOYO_MAC_FILE_RENAME]     = "rename",
+	[TOMOYO_MAC_FILE_CHMOD]      = "chmod",
+	[TOMOYO_MAC_FILE_CHOWN]      = "chown",
+	[TOMOYO_MAC_FILE_CHGRP]      = "chgrp",
+	[TOMOYO_MAC_FILE_IOCTL]      = "ioctl",
+	[TOMOYO_MAC_FILE_CHROOT]     = "chroot",
+	[TOMOYO_MAC_FILE_MOUNT]      = "mount",
+	[TOMOYO_MAC_FILE_UMOUNT]     = "unmount",
+	[TOMOYO_MAC_FILE_PIVOT_ROOT] = "pivot_root",
+	/* CONFIG::network group */
+	[TOMOYO_MAC_NETWORK_INET_STREAM_BIND]       = "inet_stream_bind",
+	[TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN]     = "inet_stream_listen",
+	[TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT]    = "inet_stream_connect",
+	[TOMOYO_MAC_NETWORK_INET_DGRAM_BIND]        = "inet_dgram_bind",
+	[TOMOYO_MAC_NETWORK_INET_DGRAM_SEND]        = "inet_dgram_send",
+	[TOMOYO_MAC_NETWORK_INET_RAW_BIND]          = "inet_raw_bind",
+	[TOMOYO_MAC_NETWORK_INET_RAW_SEND]          = "inet_raw_send",
+	[TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND]       = "unix_stream_bind",
+	[TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN]     = "unix_stream_listen",
+	[TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT]    = "unix_stream_connect",
+	[TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND]        = "unix_dgram_bind",
+	[TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND]        = "unix_dgram_send",
+	[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND]    = "unix_seqpacket_bind",
+	[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN]  = "unix_seqpacket_listen",
+	[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = "unix_seqpacket_connect",
+	/* CONFIG::misc group */
+	[TOMOYO_MAC_ENVIRON] = "env",
+	/* CONFIG group */
+	[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file",
+	[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+	[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_MISC] = "misc",
+};
+
+/* String table for conditions. */
+const char * const tomoyo_condition_keyword[TOMOYO_MAX_CONDITION_KEYWORD] = {
+	[TOMOYO_TASK_UID]             = "task.uid",
+	[TOMOYO_TASK_EUID]            = "task.euid",
+	[TOMOYO_TASK_SUID]            = "task.suid",
+	[TOMOYO_TASK_FSUID]           = "task.fsuid",
+	[TOMOYO_TASK_GID]             = "task.gid",
+	[TOMOYO_TASK_EGID]            = "task.egid",
+	[TOMOYO_TASK_SGID]            = "task.sgid",
+	[TOMOYO_TASK_FSGID]           = "task.fsgid",
+	[TOMOYO_TASK_PID]             = "task.pid",
+	[TOMOYO_TASK_PPID]            = "task.ppid",
+	[TOMOYO_EXEC_ARGC]            = "exec.argc",
+	[TOMOYO_EXEC_ENVC]            = "exec.envc",
+	[TOMOYO_TYPE_IS_SOCKET]       = "socket",
+	[TOMOYO_TYPE_IS_SYMLINK]      = "symlink",
+	[TOMOYO_TYPE_IS_FILE]         = "file",
+	[TOMOYO_TYPE_IS_BLOCK_DEV]    = "block",
+	[TOMOYO_TYPE_IS_DIRECTORY]    = "directory",
+	[TOMOYO_TYPE_IS_CHAR_DEV]     = "char",
+	[TOMOYO_TYPE_IS_FIFO]         = "fifo",
+	[TOMOYO_MODE_SETUID]          = "setuid",
+	[TOMOYO_MODE_SETGID]          = "setgid",
+	[TOMOYO_MODE_STICKY]          = "sticky",
+	[TOMOYO_MODE_OWNER_READ]      = "owner_read",
+	[TOMOYO_MODE_OWNER_WRITE]     = "owner_write",
+	[TOMOYO_MODE_OWNER_EXECUTE]   = "owner_execute",
+	[TOMOYO_MODE_GROUP_READ]      = "group_read",
+	[TOMOYO_MODE_GROUP_WRITE]     = "group_write",
+	[TOMOYO_MODE_GROUP_EXECUTE]   = "group_execute",
+	[TOMOYO_MODE_OTHERS_READ]     = "others_read",
+	[TOMOYO_MODE_OTHERS_WRITE]    = "others_write",
+	[TOMOYO_MODE_OTHERS_EXECUTE]  = "others_execute",
+	[TOMOYO_EXEC_REALPATH]        = "exec.realpath",
+	[TOMOYO_SYMLINK_TARGET]       = "symlink.target",
+	[TOMOYO_PATH1_UID]            = "path1.uid",
+	[TOMOYO_PATH1_GID]            = "path1.gid",
+	[TOMOYO_PATH1_INO]            = "path1.ino",
+	[TOMOYO_PATH1_MAJOR]          = "path1.major",
+	[TOMOYO_PATH1_MINOR]          = "path1.minor",
+	[TOMOYO_PATH1_PERM]           = "path1.perm",
+	[TOMOYO_PATH1_TYPE]           = "path1.type",
+	[TOMOYO_PATH1_DEV_MAJOR]      = "path1.dev_major",
+	[TOMOYO_PATH1_DEV_MINOR]      = "path1.dev_minor",
+	[TOMOYO_PATH2_UID]            = "path2.uid",
+	[TOMOYO_PATH2_GID]            = "path2.gid",
+	[TOMOYO_PATH2_INO]            = "path2.ino",
+	[TOMOYO_PATH2_MAJOR]          = "path2.major",
+	[TOMOYO_PATH2_MINOR]          = "path2.minor",
+	[TOMOYO_PATH2_PERM]           = "path2.perm",
+	[TOMOYO_PATH2_TYPE]           = "path2.type",
+	[TOMOYO_PATH2_DEV_MAJOR]      = "path2.dev_major",
+	[TOMOYO_PATH2_DEV_MINOR]      = "path2.dev_minor",
+	[TOMOYO_PATH1_PARENT_UID]     = "path1.parent.uid",
+	[TOMOYO_PATH1_PARENT_GID]     = "path1.parent.gid",
+	[TOMOYO_PATH1_PARENT_INO]     = "path1.parent.ino",
+	[TOMOYO_PATH1_PARENT_PERM]    = "path1.parent.perm",
+	[TOMOYO_PATH2_PARENT_UID]     = "path2.parent.uid",
+	[TOMOYO_PATH2_PARENT_GID]     = "path2.parent.gid",
+	[TOMOYO_PATH2_PARENT_INO]     = "path2.parent.ino",
+	[TOMOYO_PATH2_PARENT_PERM]    = "path2.parent.perm",
+};
+
+/* String table for PREFERENCE keyword. */
+static const char * const tomoyo_pref_keywords[TOMOYO_MAX_PREF] = {
+	[TOMOYO_PREF_MAX_AUDIT_LOG]      = "max_audit_log",
+	[TOMOYO_PREF_MAX_LEARNING_ENTRY] = "max_learning_entry",
+};
+
+/* String table for path operation. */
+const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = {
+	[TOMOYO_TYPE_EXECUTE]    = "execute",
+	[TOMOYO_TYPE_READ]       = "read",
+	[TOMOYO_TYPE_WRITE]      = "write",
+	[TOMOYO_TYPE_APPEND]     = "append",
+	[TOMOYO_TYPE_UNLINK]     = "unlink",
+	[TOMOYO_TYPE_GETATTR]    = "getattr",
+	[TOMOYO_TYPE_RMDIR]      = "rmdir",
+	[TOMOYO_TYPE_TRUNCATE]   = "truncate",
+	[TOMOYO_TYPE_SYMLINK]    = "symlink",
+	[TOMOYO_TYPE_CHROOT]     = "chroot",
+	[TOMOYO_TYPE_UMOUNT]     = "unmount",
+};
+
+/* String table for socket's operation. */
+const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION] = {
+	[TOMOYO_NETWORK_BIND]    = "bind",
+	[TOMOYO_NETWORK_LISTEN]  = "listen",
+	[TOMOYO_NETWORK_CONNECT] = "connect",
+	[TOMOYO_NETWORK_SEND]    = "send",
+};
+
+/* String table for categories. */
+static const char * const tomoyo_category_keywords
+[TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
+	[TOMOYO_MAC_CATEGORY_FILE]    = "file",
+	[TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+	[TOMOYO_MAC_CATEGORY_MISC]    = "misc",
+};
+
+/* Permit policy management by non-root user? */
+static bool tomoyo_manage_by_non_root;
+
+/* Utility functions. */
+
+/**
+ * tomoyo_yesno - Return "yes" or "no".
+ *
+ * @value: Bool value.
+ */
+const char *tomoyo_yesno(const unsigned int value)
+{
+	return value ? "yes" : "no";
+}
+
+/**
+ * tomoyo_addprintf - strncat()-like-snprintf().
+ *
+ * @buffer: Buffer to write to. Must be '\0'-terminated.
+ * @len:    Size of @buffer.
+ * @fmt:    The printf()'s format string, followed by parameters.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_addprintf(char *buffer, int len, const char *fmt, ...)
+{
+	va_list args;
+	const int pos = strlen(buffer);
+	va_start(args, fmt);
+	vsnprintf(buffer + pos, len - pos - 1, fmt, args);
+	va_end(args);
+}
+
+/**
+ * tomoyo_flush - Flush queued string to userspace's buffer.
+ *
+ * @head:   Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns true if all data was flushed, false otherwise.
+ */
+static bool tomoyo_flush(struct tomoyo_io_buffer *head)
+{
+	while (head->r.w_pos) {
+		const char *w = head->r.w[0];
+		size_t len = strlen(w);
+		if (len) {
+			if (len > head->read_user_buf_avail)
+				len = head->read_user_buf_avail;
+			if (!len)
+				return false;
+			if (copy_to_user(head->read_user_buf, w, len))
+				return false;
+			head->read_user_buf_avail -= len;
+			head->read_user_buf += len;
+			w += len;
+		}
+		head->r.w[0] = w;
+		if (*w)
+			return false;
+		/* Add '\0' for audit logs and query. */
+		if (head->poll) {
+			if (!head->read_user_buf_avail ||
+			    copy_to_user(head->read_user_buf, "", 1))
+				return false;
+			head->read_user_buf_avail--;
+			head->read_user_buf++;
+		}
+		head->r.w_pos--;
+		for (len = 0; len < head->r.w_pos; len++)
+			head->r.w[len] = head->r.w[len + 1];
+	}
+	head->r.avail = 0;
+	return true;
+}
+
+/**
+ * tomoyo_set_string - Queue string to "struct tomoyo_io_buffer" structure.
+ *
+ * @head:   Pointer to "struct tomoyo_io_buffer".
+ * @string: String to print.
+ *
+ * Note that @string has to be kept valid until @head is kfree()d.
+ * This means that char[] allocated on stack memory cannot be passed to
+ * this function. Use tomoyo_io_printf() for char[] allocated on stack memory.
+ */
+static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string)
+{
+	if (head->r.w_pos < TOMOYO_MAX_IO_READ_QUEUE) {
+		head->r.w[head->r.w_pos++] = string;
+		tomoyo_flush(head);
+	} else
+		WARN_ON(1);
+}
+
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+			     ...) __printf(2, 3);
+
+/**
+ * tomoyo_io_printf - printf() to "struct tomoyo_io_buffer" structure.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @fmt:  The printf()'s format string, followed by parameters.
+ */
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+			     ...)
+{
+	va_list args;
+	size_t len;
+	size_t pos = head->r.avail;
+	int size = head->readbuf_size - pos;
+	if (size <= 0)
+		return;
+	va_start(args, fmt);
+	len = vsnprintf(head->read_buf + pos, size, fmt, args) + 1;
+	va_end(args);
+	if (pos + len >= head->readbuf_size) {
+		WARN_ON(1);
+		return;
+	}
+	head->r.avail += len;
+	tomoyo_set_string(head, head->read_buf + pos);
+}
+
+/**
+ * tomoyo_set_space - Put a space to "struct tomoyo_io_buffer" structure.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_set_space(struct tomoyo_io_buffer *head)
+{
+	tomoyo_set_string(head, " ");
+}
+
+/**
+ * tomoyo_set_lf - Put a line feed to "struct tomoyo_io_buffer" structure.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static bool tomoyo_set_lf(struct tomoyo_io_buffer *head)
+{
+	tomoyo_set_string(head, "\n");
+	return !head->r.w_pos;
+}
+
+/**
+ * tomoyo_set_slash - Put a shash to "struct tomoyo_io_buffer" structure.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_set_slash(struct tomoyo_io_buffer *head)
+{
+	tomoyo_set_string(head, "/");
+}
+
+/* List of namespaces. */
+LIST_HEAD(tomoyo_namespace_list);
+/* True if namespace other than tomoyo_kernel_namespace is defined. */
+static bool tomoyo_namespace_enabled;
+
+/**
+ * tomoyo_init_policy_namespace - Initialize namespace.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ *
+ * Returns nothing.
+ */
+void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns)
+{
+	unsigned int idx;
+	for (idx = 0; idx < TOMOYO_MAX_ACL_GROUPS; idx++)
+		INIT_LIST_HEAD(&ns->acl_group[idx]);
+	for (idx = 0; idx < TOMOYO_MAX_GROUP; idx++)
+		INIT_LIST_HEAD(&ns->group_list[idx]);
+	for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++)
+		INIT_LIST_HEAD(&ns->policy_list[idx]);
+	ns->profile_version = 20110903;
+	tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list);
+	list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list);
+}
+
+/**
+ * tomoyo_print_namespace - Print namespace header.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_namespace(struct tomoyo_io_buffer *head)
+{
+	if (!tomoyo_namespace_enabled)
+		return;
+	tomoyo_set_string(head,
+			  container_of(head->r.ns,
+				       struct tomoyo_policy_namespace,
+				       namespace_list)->name);
+	tomoyo_set_space(head);
+}
+
+/**
+ * tomoyo_print_name_union - Print a tomoyo_name_union.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr:  Pointer to "struct tomoyo_name_union".
+ */
+static void tomoyo_print_name_union(struct tomoyo_io_buffer *head,
+				    const struct tomoyo_name_union *ptr)
+{
+	tomoyo_set_space(head);
+	if (ptr->group) {
+		tomoyo_set_string(head, "@");
+		tomoyo_set_string(head, ptr->group->group_name->name);
+	} else {
+		tomoyo_set_string(head, ptr->filename->name);
+	}
+}
+
+/**
+ * tomoyo_print_name_union_quoted - Print a tomoyo_name_union with a quote.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr:  Pointer to "struct tomoyo_name_union".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_name_union_quoted(struct tomoyo_io_buffer *head,
+					   const struct tomoyo_name_union *ptr)
+{
+	if (ptr->group) {
+		tomoyo_set_string(head, "@");
+		tomoyo_set_string(head, ptr->group->group_name->name);
+	} else {
+		tomoyo_set_string(head, "\"");
+		tomoyo_set_string(head, ptr->filename->name);
+		tomoyo_set_string(head, "\"");
+	}
+}
+
+/**
+ * tomoyo_print_number_union_nospace - Print a tomoyo_number_union without a space.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr:  Pointer to "struct tomoyo_number_union".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_number_union_nospace
+(struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr)
+{
+	if (ptr->group) {
+		tomoyo_set_string(head, "@");
+		tomoyo_set_string(head, ptr->group->group_name->name);
+	} else {
+		int i;
+		unsigned long min = ptr->values[0];
+		const unsigned long max = ptr->values[1];
+		u8 min_type = ptr->value_type[0];
+		const u8 max_type = ptr->value_type[1];
+		char buffer[128];
+		buffer[0] = '\0';
+		for (i = 0; i < 2; i++) {
+			switch (min_type) {
+			case TOMOYO_VALUE_TYPE_HEXADECIMAL:
+				tomoyo_addprintf(buffer, sizeof(buffer),
+						 "0x%lX", min);
+				break;
+			case TOMOYO_VALUE_TYPE_OCTAL:
+				tomoyo_addprintf(buffer, sizeof(buffer),
+						 "0%lo", min);
+				break;
+			default:
+				tomoyo_addprintf(buffer, sizeof(buffer), "%lu",
+						 min);
+				break;
+			}
+			if (min == max && min_type == max_type)
+				break;
+			tomoyo_addprintf(buffer, sizeof(buffer), "-");
+			min_type = max_type;
+			min = max;
+		}
+		tomoyo_io_printf(head, "%s", buffer);
+	}
+}
+
+/**
+ * tomoyo_print_number_union - Print a tomoyo_number_union.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr:  Pointer to "struct tomoyo_number_union".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_number_union(struct tomoyo_io_buffer *head,
+				      const struct tomoyo_number_union *ptr)
+{
+	tomoyo_set_space(head);
+	tomoyo_print_number_union_nospace(head, ptr);
+}
+
+/**
+ * tomoyo_assign_profile - Create a new profile.
+ *
+ * @ns:      Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number to create.
+ *
+ * Returns pointer to "struct tomoyo_profile" on success, NULL otherwise.
+ */
+static struct tomoyo_profile *tomoyo_assign_profile
+(struct tomoyo_policy_namespace *ns, const unsigned int profile)
+{
+	struct tomoyo_profile *ptr;
+	struct tomoyo_profile *entry;
+	if (profile >= TOMOYO_MAX_PROFILES)
+		return NULL;
+	ptr = ns->profile_ptr[profile];
+	if (ptr)
+		return ptr;
+	entry = kzalloc(sizeof(*entry), GFP_NOFS);
+	if (mutex_lock_interruptible(&tomoyo_policy_lock))
+		goto out;
+	ptr = ns->profile_ptr[profile];
+	if (!ptr && tomoyo_memory_ok(entry)) {
+		ptr = entry;
+		ptr->default_config = TOMOYO_CONFIG_DISABLED |
+			TOMOYO_CONFIG_WANT_GRANT_LOG |
+			TOMOYO_CONFIG_WANT_REJECT_LOG;
+		memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT,
+		       sizeof(ptr->config));
+		ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] =
+			CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG;
+		ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] =
+			CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY;
+		mb(); /* Avoid out-of-order execution. */
+		ns->profile_ptr[profile] = ptr;
+		entry = NULL;
+	}
+	mutex_unlock(&tomoyo_policy_lock);
+ out:
+	kfree(entry);
+	return ptr;
+}
+
+/**
+ * tomoyo_profile - Find a profile.
+ *
+ * @ns:      Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number to find.
+ *
+ * Returns pointer to "struct tomoyo_profile".
+ */
+struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns,
+				      const u8 profile)
+{
+	static struct tomoyo_profile tomoyo_null_profile;
+	struct tomoyo_profile *ptr = ns->profile_ptr[profile];
+	if (!ptr)
+		ptr = &tomoyo_null_profile;
+	return ptr;
+}
+
+/**
+ * tomoyo_find_yesno - Find values for specified keyword.
+ *
+ * @string: String to check.
+ * @find:   Name of keyword.
+ *
+ * Returns 1 if "@find=yes" was found, 0 if "@find=no" was found, -1 otherwise.
+ */
+static s8 tomoyo_find_yesno(const char *string, const char *find)
+{
+	const char *cp = strstr(string, find);
+	if (cp) {
+		cp += strlen(find);
+		if (!strncmp(cp, "=yes", 4))
+			return 1;
+		else if (!strncmp(cp, "=no", 3))
+			return 0;
+	}
+	return -1;
+}
+
+/**
+ * tomoyo_set_uint - Set value for specified preference.
+ *
+ * @i:      Pointer to "unsigned int".
+ * @string: String to check.
+ * @find:   Name of keyword.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_set_uint(unsigned int *i, const char *string,
+			    const char *find)
+{
+	const char *cp = strstr(string, find);
+	if (cp)
+		sscanf(cp + strlen(find), "=%u", i);
+}
+
+/**
+ * tomoyo_set_mode - Set mode for specified profile.
+ *
+ * @name:    Name of functionality.
+ * @value:   Mode for @name.
+ * @profile: Pointer to "struct tomoyo_profile".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_set_mode(char *name, const char *value,
+			   struct tomoyo_profile *profile)
+{
+	u8 i;
+	u8 config;
+	if (!strcmp(name, "CONFIG")) {
+		i = TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX;
+		config = profile->default_config;
+	} else if (tomoyo_str_starts(&name, "CONFIG::")) {
+		config = 0;
+		for (i = 0; i < TOMOYO_MAX_MAC_INDEX
+			     + TOMOYO_MAX_MAC_CATEGORY_INDEX; i++) {
+			int len = 0;
+			if (i < TOMOYO_MAX_MAC_INDEX) {
+				const u8 c = tomoyo_index2category[i];
+				const char *category =
+					tomoyo_category_keywords[c];
+				len = strlen(category);
+				if (strncmp(name, category, len) ||
+				    name[len++] != ':' || name[len++] != ':')
+					continue;
+			}
+			if (strcmp(name + len, tomoyo_mac_keywords[i]))
+				continue;
+			config = profile->config[i];
+			break;
+		}
+		if (i == TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX)
+			return -EINVAL;
+	} else {
+		return -EINVAL;
+	}
+	if (strstr(value, "use_default")) {
+		config = TOMOYO_CONFIG_USE_DEFAULT;
+	} else {
+		u8 mode;
+		for (mode = 0; mode < 4; mode++)
+			if (strstr(value, tomoyo_mode[mode]))
+				/*
+				 * Update lower 3 bits in order to distinguish
+				 * 'config' from 'TOMOYO_CONFIG_USE_DEAFULT'.
+				 */
+				config = (config & ~7) | mode;
+		if (config != TOMOYO_CONFIG_USE_DEFAULT) {
+			switch (tomoyo_find_yesno(value, "grant_log")) {
+			case 1:
+				config |= TOMOYO_CONFIG_WANT_GRANT_LOG;
+				break;
+			case 0:
+				config &= ~TOMOYO_CONFIG_WANT_GRANT_LOG;
+				break;
+			}
+			switch (tomoyo_find_yesno(value, "reject_log")) {
+			case 1:
+				config |= TOMOYO_CONFIG_WANT_REJECT_LOG;
+				break;
+			case 0:
+				config &= ~TOMOYO_CONFIG_WANT_REJECT_LOG;
+				break;
+			}
+		}
+	}
+	if (i < TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX)
+		profile->config[i] = config;
+	else if (config != TOMOYO_CONFIG_USE_DEFAULT)
+		profile->default_config = config;
+	return 0;
+}
+
+/**
+ * tomoyo_write_profile - Write profile table.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_write_profile(struct tomoyo_io_buffer *head)
+{
+	char *data = head->write_buf;
+	unsigned int i;
+	char *cp;
+	struct tomoyo_profile *profile;
+	if (sscanf(data, "PROFILE_VERSION=%u", &head->w.ns->profile_version)
+	    == 1)
+		return 0;
+	i = simple_strtoul(data, &cp, 10);
+	if (*cp != '-')
+		return -EINVAL;
+	data = cp + 1;
+	profile = tomoyo_assign_profile(head->w.ns, i);
+	if (!profile)
+		return -EINVAL;
+	cp = strchr(data, '=');
+	if (!cp)
+		return -EINVAL;
+	*cp++ = '\0';
+	if (!strcmp(data, "COMMENT")) {
+		static DEFINE_SPINLOCK(lock);
+		const struct tomoyo_path_info *new_comment
+			= tomoyo_get_name(cp);
+		const struct tomoyo_path_info *old_comment;
+		if (!new_comment)
+			return -ENOMEM;
+		spin_lock(&lock);
+		old_comment = profile->comment;
+		profile->comment = new_comment;
+		spin_unlock(&lock);
+		tomoyo_put_name(old_comment);
+		return 0;
+	}
+	if (!strcmp(data, "PREFERENCE")) {
+		for (i = 0; i < TOMOYO_MAX_PREF; i++)
+			tomoyo_set_uint(&profile->pref[i], cp,
+					tomoyo_pref_keywords[i]);
+		return 0;
+	}
+	return tomoyo_set_mode(data, cp, profile);
+}
+
+/**
+ * tomoyo_print_config - Print mode for specified functionality.
+ *
+ * @head:   Pointer to "struct tomoyo_io_buffer".
+ * @config: Mode for that functionality.
+ *
+ * Returns nothing.
+ *
+ * Caller prints functionality's name.
+ */
+static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config)
+{
+	tomoyo_io_printf(head, "={ mode=%s grant_log=%s reject_log=%s }\n",
+			 tomoyo_mode[config & 3],
+			 tomoyo_yesno(config & TOMOYO_CONFIG_WANT_GRANT_LOG),
+			 tomoyo_yesno(config & TOMOYO_CONFIG_WANT_REJECT_LOG));
+}
+
+/**
+ * tomoyo_read_profile - Read profile table.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_read_profile(struct tomoyo_io_buffer *head)
+{
+	u8 index;
+	struct tomoyo_policy_namespace *ns =
+		container_of(head->r.ns, typeof(*ns), namespace_list);
+	const struct tomoyo_profile *profile;
+	if (head->r.eof)
+		return;
+ next:
+	index = head->r.index;
+	profile = ns->profile_ptr[index];
+	switch (head->r.step) {
+	case 0:
+		tomoyo_print_namespace(head);
+		tomoyo_io_printf(head, "PROFILE_VERSION=%u\n",
+				 ns->profile_version);
+		head->r.step++;
+		break;
+	case 1:
+		for ( ; head->r.index < TOMOYO_MAX_PROFILES;
+		      head->r.index++)
+			if (ns->profile_ptr[head->r.index])
+				break;
+		if (head->r.index == TOMOYO_MAX_PROFILES) {
+			head->r.eof = true;
+			return;
+		}
+		head->r.step++;
+		break;
+	case 2:
+		{
+			u8 i;
+			const struct tomoyo_path_info *comment =
+				profile->comment;
+			tomoyo_print_namespace(head);
+			tomoyo_io_printf(head, "%u-COMMENT=", index);
+			tomoyo_set_string(head, comment ? comment->name : "");
+			tomoyo_set_lf(head);
+			tomoyo_print_namespace(head);
+			tomoyo_io_printf(head, "%u-PREFERENCE={ ", index);
+			for (i = 0; i < TOMOYO_MAX_PREF; i++)
+				tomoyo_io_printf(head, "%s=%u ",
+						 tomoyo_pref_keywords[i],
+						 profile->pref[i]);
+			tomoyo_set_string(head, "}\n");
+			head->r.step++;
+		}
+		break;
+	case 3:
+		{
+			tomoyo_print_namespace(head);
+			tomoyo_io_printf(head, "%u-%s", index, "CONFIG");
+			tomoyo_print_config(head, profile->default_config);
+			head->r.bit = 0;
+			head->r.step++;
+		}
+		break;
+	case 4:
+		for ( ; head->r.bit < TOMOYO_MAX_MAC_INDEX
+			      + TOMOYO_MAX_MAC_CATEGORY_INDEX; head->r.bit++) {
+			const u8 i = head->r.bit;
+			const u8 config = profile->config[i];
+			if (config == TOMOYO_CONFIG_USE_DEFAULT)
+				continue;
+			tomoyo_print_namespace(head);
+			if (i < TOMOYO_MAX_MAC_INDEX)
+				tomoyo_io_printf(head, "%u-CONFIG::%s::%s",
+						 index,
+						 tomoyo_category_keywords
+						 [tomoyo_index2category[i]],
+						 tomoyo_mac_keywords[i]);
+			else
+				tomoyo_io_printf(head, "%u-CONFIG::%s", index,
+						 tomoyo_mac_keywords[i]);
+			tomoyo_print_config(head, config);
+			head->r.bit++;
+			break;
+		}
+		if (head->r.bit == TOMOYO_MAX_MAC_INDEX
+		    + TOMOYO_MAX_MAC_CATEGORY_INDEX) {
+			head->r.index++;
+			head->r.step = 1;
+		}
+		break;
+	}
+	if (tomoyo_flush(head))
+		goto next;
+}
+
+/**
+ * tomoyo_same_manager - Check for duplicated "struct tomoyo_manager" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_manager(const struct tomoyo_acl_head *a,
+				const struct tomoyo_acl_head *b)
+{
+	return container_of(a, struct tomoyo_manager, head)->manager ==
+		container_of(b, struct tomoyo_manager, head)->manager;
+}
+
+/**
+ * tomoyo_update_manager_entry - Add a manager entry.
+ *
+ * @manager:   The path to manager or the domainnamme.
+ * @is_delete: True if it is a delete request.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_manager_entry(const char *manager,
+				       const bool is_delete)
+{
+	struct tomoyo_manager e = { };
+	struct tomoyo_acl_param param = {
+		/* .ns = &tomoyo_kernel_namespace, */
+		.is_delete = is_delete,
+		.list = &tomoyo_kernel_namespace.
+		policy_list[TOMOYO_ID_MANAGER],
+	};
+	int error = is_delete ? -ENOENT : -ENOMEM;
+	if (!tomoyo_correct_domain(manager) &&
+	    !tomoyo_correct_word(manager))
+		return -EINVAL;
+	e.manager = tomoyo_get_name(manager);
+	if (e.manager) {
+		error = tomoyo_update_policy(&e.head, sizeof(e), &param,
+					     tomoyo_same_manager);
+		tomoyo_put_name(e.manager);
+	}
+	return error;
+}
+
+/**
+ * tomoyo_write_manager - Write manager policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_manager(struct tomoyo_io_buffer *head)
+{
+	char *data = head->write_buf;
+
+	if (!strcmp(data, "manage_by_non_root")) {
+		tomoyo_manage_by_non_root = !head->w.is_delete;
+		return 0;
+	}
+	return tomoyo_update_manager_entry(data, head->w.is_delete);
+}
+
+/**
+ * tomoyo_read_manager - Read manager policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static void tomoyo_read_manager(struct tomoyo_io_buffer *head)
+{
+	if (head->r.eof)
+		return;
+	list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace.
+			     policy_list[TOMOYO_ID_MANAGER]) {
+		struct tomoyo_manager *ptr =
+			list_entry(head->r.acl, typeof(*ptr), head.list);
+		if (ptr->head.is_deleted)
+			continue;
+		if (!tomoyo_flush(head))
+			return;
+		tomoyo_set_string(head, ptr->manager->name);
+		tomoyo_set_lf(head);
+	}
+	head->r.eof = true;
+}
+
+/**
+ * tomoyo_manager - Check whether the current process is a policy manager.
+ *
+ * Returns true if the current process is permitted to modify policy
+ * via /sys/kernel/security/tomoyo/ interface.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static bool tomoyo_manager(void)
+{
+	struct tomoyo_manager *ptr;
+	const char *exe;
+	const struct task_struct *task = current;
+	const struct tomoyo_path_info *domainname = tomoyo_domain()->domainname;
+	bool found = false;
+
+	if (!tomoyo_policy_loaded)
+		return true;
+	if (!tomoyo_manage_by_non_root &&
+	    (!uid_eq(task->cred->uid,  GLOBAL_ROOT_UID) ||
+	     !uid_eq(task->cred->euid, GLOBAL_ROOT_UID)))
+		return false;
+	exe = tomoyo_get_exe();
+	if (!exe)
+		return false;
+	list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.
+				policy_list[TOMOYO_ID_MANAGER], head.list) {
+		if (!ptr->head.is_deleted &&
+		    (!tomoyo_pathcmp(domainname, ptr->manager) ||
+		     !strcmp(exe, ptr->manager->name))) {
+			found = true;
+			break;
+		}
+	}
+	if (!found) { /* Reduce error messages. */
+		static pid_t last_pid;
+		const pid_t pid = current->pid;
+		if (last_pid != pid) {
+			printk(KERN_WARNING "%s ( %s ) is not permitted to "
+			       "update policies.\n", domainname->name, exe);
+			last_pid = pid;
+		}
+	}
+	kfree(exe);
+	return found;
+}
+
+static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
+(unsigned int serial);
+
+/**
+ * tomoyo_select_domain - Parse select command.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @data: String to parse.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
+				 const char *data)
+{
+	unsigned int pid;
+	struct tomoyo_domain_info *domain = NULL;
+	bool global_pid = false;
+	if (strncmp(data, "select ", 7))
+		return false;
+	data += 7;
+	if (sscanf(data, "pid=%u", &pid) == 1 ||
+	    (global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) {
+		struct task_struct *p;
+		rcu_read_lock();
+		if (global_pid)
+			p = find_task_by_pid_ns(pid, &init_pid_ns);
+		else
+			p = find_task_by_vpid(pid);
+		if (p)
+			domain = tomoyo_real_domain(p);
+		rcu_read_unlock();
+	} else if (!strncmp(data, "domain=", 7)) {
+		if (tomoyo_domain_def(data + 7))
+			domain = tomoyo_find_domain(data + 7);
+	} else if (sscanf(data, "Q=%u", &pid) == 1) {
+		domain = tomoyo_find_domain_by_qid(pid);
+	} else
+		return false;
+	head->w.domain = domain;
+	/* Accessing read_buf is safe because head->io_sem is held. */
+	if (!head->read_buf)
+		return true; /* Do nothing if open(O_WRONLY). */
+	memset(&head->r, 0, sizeof(head->r));
+	head->r.print_this_domain_only = true;
+	if (domain)
+		head->r.domain = &domain->list;
+	else
+		head->r.eof = 1;
+	tomoyo_io_printf(head, "# select %s\n", data);
+	if (domain && domain->is_deleted)
+		tomoyo_io_printf(head, "# This is a deleted domain.\n");
+	return true;
+}
+
+/**
+ * tomoyo_same_task_acl - Check for duplicated "struct tomoyo_task_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a,
+			      const struct tomoyo_acl_info *b)
+{
+	const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head);
+	const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head);
+	return p1->domainname == p2->domainname;
+}
+
+/**
+ * tomoyo_write_task - Update task related list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_task(struct tomoyo_acl_param *param)
+{
+	int error = -EINVAL;
+	if (tomoyo_str_starts(&param->data, "manual_domain_transition ")) {
+		struct tomoyo_task_acl e = {
+			.head.type = TOMOYO_TYPE_MANUAL_TASK_ACL,
+			.domainname = tomoyo_get_domainname(param),
+		};
+		if (e.domainname)
+			error = tomoyo_update_domain(&e.head, sizeof(e), param,
+						     tomoyo_same_task_acl,
+						     NULL);
+		tomoyo_put_name(e.domainname);
+	}
+	return error;
+}
+
+/**
+ * tomoyo_delete_domain - Delete a domain.
+ *
+ * @domainname: The name of domain.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_delete_domain(char *domainname)
+{
+	struct tomoyo_domain_info *domain;
+	struct tomoyo_path_info name;
+
+	name.name = domainname;
+	tomoyo_fill_path_info(&name);
+	if (mutex_lock_interruptible(&tomoyo_policy_lock))
+		return -EINTR;
+	/* Is there an active domain? */
+	list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+		/* Never delete tomoyo_kernel_domain */
+		if (domain == &tomoyo_kernel_domain)
+			continue;
+		if (domain->is_deleted ||
+		    tomoyo_pathcmp(domain->domainname, &name))
+			continue;
+		domain->is_deleted = true;
+		break;
+	}
+	mutex_unlock(&tomoyo_policy_lock);
+	return 0;
+}
+
+/**
+ * tomoyo_write_domain2 - Write domain policy.
+ *
+ * @ns:        Pointer to "struct tomoyo_policy_namespace".
+ * @list:      Pointer to "struct list_head".
+ * @data:      Policy to be interpreted.
+ * @is_delete: True if it is a delete request.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns,
+				struct list_head *list, char *data,
+				const bool is_delete)
+{
+	struct tomoyo_acl_param param = {
+		.ns = ns,
+		.list = list,
+		.data = data,
+		.is_delete = is_delete,
+	};
+	static const struct {
+		const char *keyword;
+		int (*write) (struct tomoyo_acl_param *);
+	} tomoyo_callback[5] = {
+		{ "file ", tomoyo_write_file },
+		{ "network inet ", tomoyo_write_inet_network },
+		{ "network unix ", tomoyo_write_unix_network },
+		{ "misc ", tomoyo_write_misc },
+		{ "task ", tomoyo_write_task },
+	};
+	u8 i;
+
+	for (i = 0; i < ARRAY_SIZE(tomoyo_callback); i++) {
+		if (!tomoyo_str_starts(&param.data,
+				       tomoyo_callback[i].keyword))
+			continue;
+		return tomoyo_callback[i].write(&param);
+	}
+	return -EINVAL;
+}
+
+/* String table for domain flags. */
+const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS] = {
+	[TOMOYO_DIF_QUOTA_WARNED]      = "quota_exceeded\n",
+	[TOMOYO_DIF_TRANSITION_FAILED] = "transition_failed\n",
+};
+
+/**
+ * tomoyo_write_domain - Write domain policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_domain(struct tomoyo_io_buffer *head)
+{
+	char *data = head->write_buf;
+	struct tomoyo_policy_namespace *ns;
+	struct tomoyo_domain_info *domain = head->w.domain;
+	const bool is_delete = head->w.is_delete;
+	bool is_select = !is_delete && tomoyo_str_starts(&data, "select ");
+	unsigned int profile;
+	if (*data == '<') {
+		int ret = 0;
+		domain = NULL;
+		if (is_delete)
+			ret = tomoyo_delete_domain(data);
+		else if (is_select)
+			domain = tomoyo_find_domain(data);
+		else
+			domain = tomoyo_assign_domain(data, false);
+		head->w.domain = domain;
+		return ret;
+	}
+	if (!domain)
+		return -EINVAL;
+	ns = domain->ns;
+	if (sscanf(data, "use_profile %u", &profile) == 1
+	    && profile < TOMOYO_MAX_PROFILES) {
+		if (!tomoyo_policy_loaded || ns->profile_ptr[profile])
+			domain->profile = (u8) profile;
+		return 0;
+	}
+	if (sscanf(data, "use_group %u\n", &profile) == 1
+	    && profile < TOMOYO_MAX_ACL_GROUPS) {
+		if (!is_delete)
+			domain->group = (u8) profile;
+		return 0;
+	}
+	for (profile = 0; profile < TOMOYO_MAX_DOMAIN_INFO_FLAGS; profile++) {
+		const char *cp = tomoyo_dif[profile];
+		if (strncmp(data, cp, strlen(cp) - 1))
+			continue;
+		domain->flags[profile] = !is_delete;
+		return 0;
+	}
+	return tomoyo_write_domain2(ns, &domain->acl_info_list, data,
+				    is_delete);
+}
+
+/**
+ * tomoyo_print_condition - Print condition part.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @cond: Pointer to "struct tomoyo_condition".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
+				   const struct tomoyo_condition *cond)
+{
+	switch (head->r.cond_step) {
+	case 0:
+		head->r.cond_index = 0;
+		head->r.cond_step++;
+		if (cond->transit) {
+			tomoyo_set_space(head);
+			tomoyo_set_string(head, cond->transit->name);
+		}
+		/* fall through */
+	case 1:
+		{
+			const u16 condc = cond->condc;
+			const struct tomoyo_condition_element *condp =
+				(typeof(condp)) (cond + 1);
+			const struct tomoyo_number_union *numbers_p =
+				(typeof(numbers_p)) (condp + condc);
+			const struct tomoyo_name_union *names_p =
+				(typeof(names_p))
+				(numbers_p + cond->numbers_count);
+			const struct tomoyo_argv *argv =
+				(typeof(argv)) (names_p + cond->names_count);
+			const struct tomoyo_envp *envp =
+				(typeof(envp)) (argv + cond->argc);
+			u16 skip;
+			for (skip = 0; skip < head->r.cond_index; skip++) {
+				const u8 left = condp->left;
+				const u8 right = condp->right;
+				condp++;
+				switch (left) {
+				case TOMOYO_ARGV_ENTRY:
+					argv++;
+					continue;
+				case TOMOYO_ENVP_ENTRY:
+					envp++;
+					continue;
+				case TOMOYO_NUMBER_UNION:
+					numbers_p++;
+					break;
+				}
+				switch (right) {
+				case TOMOYO_NAME_UNION:
+					names_p++;
+					break;
+				case TOMOYO_NUMBER_UNION:
+					numbers_p++;
+					break;
+				}
+			}
+			while (head->r.cond_index < condc) {
+				const u8 match = condp->equals;
+				const u8 left = condp->left;
+				const u8 right = condp->right;
+				if (!tomoyo_flush(head))
+					return false;
+				condp++;
+				head->r.cond_index++;
+				tomoyo_set_space(head);
+				switch (left) {
+				case TOMOYO_ARGV_ENTRY:
+					tomoyo_io_printf(head,
+							 "exec.argv[%lu]%s=\"",
+							 argv->index, argv->
+							 is_not ? "!" : "");
+					tomoyo_set_string(head,
+							  argv->value->name);
+					tomoyo_set_string(head, "\"");
+					argv++;
+					continue;
+				case TOMOYO_ENVP_ENTRY:
+					tomoyo_set_string(head,
+							  "exec.envp[\"");
+					tomoyo_set_string(head,
+							  envp->name->name);
+					tomoyo_io_printf(head, "\"]%s=", envp->
+							 is_not ? "!" : "");
+					if (envp->value) {
+						tomoyo_set_string(head, "\"");
+						tomoyo_set_string(head, envp->
+								  value->name);
+						tomoyo_set_string(head, "\"");
+					} else {
+						tomoyo_set_string(head,
+								  "NULL");
+					}
+					envp++;
+					continue;
+				case TOMOYO_NUMBER_UNION:
+					tomoyo_print_number_union_nospace
+						(head, numbers_p++);
+					break;
+				default:
+					tomoyo_set_string(head,
+					       tomoyo_condition_keyword[left]);
+					break;
+				}
+				tomoyo_set_string(head, match ? "=" : "!=");
+				switch (right) {
+				case TOMOYO_NAME_UNION:
+					tomoyo_print_name_union_quoted
+						(head, names_p++);
+					break;
+				case TOMOYO_NUMBER_UNION:
+					tomoyo_print_number_union_nospace
+						(head, numbers_p++);
+					break;
+				default:
+					tomoyo_set_string(head,
+					  tomoyo_condition_keyword[right]);
+					break;
+				}
+			}
+		}
+		head->r.cond_step++;
+		/* fall through */
+	case 2:
+		if (!tomoyo_flush(head))
+			break;
+		head->r.cond_step++;
+		/* fall through */
+	case 3:
+		if (cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+			tomoyo_io_printf(head, " grant_log=%s",
+					 tomoyo_yesno(cond->grant_log ==
+						      TOMOYO_GRANTLOG_YES));
+		tomoyo_set_lf(head);
+		return true;
+	}
+	return false;
+}
+
+/**
+ * tomoyo_set_group - Print "acl_group " header keyword and category name.
+ *
+ * @head:     Pointer to "struct tomoyo_io_buffer".
+ * @category: Category name.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_set_group(struct tomoyo_io_buffer *head,
+			     const char *category)
+{
+	if (head->type == TOMOYO_EXCEPTIONPOLICY) {
+		tomoyo_print_namespace(head);
+		tomoyo_io_printf(head, "acl_group %u ",
+				 head->r.acl_group_index);
+	}
+	tomoyo_set_string(head, category);
+}
+
+/**
+ * tomoyo_print_entry - Print an ACL entry.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @acl:  Pointer to an ACL entry.
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
+			       struct tomoyo_acl_info *acl)
+{
+	const u8 acl_type = acl->type;
+	bool first = true;
+	u8 bit;
+
+	if (head->r.print_cond_part)
+		goto print_cond_part;
+	if (acl->is_deleted)
+		return true;
+	if (!tomoyo_flush(head))
+		return false;
+	else if (acl_type == TOMOYO_TYPE_PATH_ACL) {
+		struct tomoyo_path_acl *ptr =
+			container_of(acl, typeof(*ptr), head);
+		const u16 perm = ptr->perm;
+		for (bit = 0; bit < TOMOYO_MAX_PATH_OPERATION; bit++) {
+			if (!(perm & (1 << bit)))
+				continue;
+			if (head->r.print_transition_related_only &&
+			    bit != TOMOYO_TYPE_EXECUTE)
+				continue;
+			if (first) {
+				tomoyo_set_group(head, "file ");
+				first = false;
+			} else {
+				tomoyo_set_slash(head);
+			}
+			tomoyo_set_string(head, tomoyo_path_keyword[bit]);
+		}
+		if (first)
+			return true;
+		tomoyo_print_name_union(head, &ptr->name);
+	} else if (acl_type == TOMOYO_TYPE_MANUAL_TASK_ACL) {
+		struct tomoyo_task_acl *ptr =
+			container_of(acl, typeof(*ptr), head);
+		tomoyo_set_group(head, "task ");
+		tomoyo_set_string(head, "manual_domain_transition ");
+		tomoyo_set_string(head, ptr->domainname->name);
+	} else if (head->r.print_transition_related_only) {
+		return true;
+	} else if (acl_type == TOMOYO_TYPE_PATH2_ACL) {
+		struct tomoyo_path2_acl *ptr =
+			container_of(acl, typeof(*ptr), head);
+		const u8 perm = ptr->perm;
+		for (bit = 0; bit < TOMOYO_MAX_PATH2_OPERATION; bit++) {
+			if (!(perm & (1 << bit)))
+				continue;
+			if (first) {
+				tomoyo_set_group(head, "file ");
+				first = false;
+			} else {
+				tomoyo_set_slash(head);
+			}
+			tomoyo_set_string(head, tomoyo_mac_keywords
+					  [tomoyo_pp2mac[bit]]);
+		}
+		if (first)
+			return true;
+		tomoyo_print_name_union(head, &ptr->name1);
+		tomoyo_print_name_union(head, &ptr->name2);
+	} else if (acl_type == TOMOYO_TYPE_PATH_NUMBER_ACL) {
+		struct tomoyo_path_number_acl *ptr =
+			container_of(acl, typeof(*ptr), head);
+		const u8 perm = ptr->perm;
+		for (bit = 0; bit < TOMOYO_MAX_PATH_NUMBER_OPERATION; bit++) {
+			if (!(perm & (1 << bit)))
+				continue;
+			if (first) {
+				tomoyo_set_group(head, "file ");
+				first = false;
+			} else {
+				tomoyo_set_slash(head);
+			}
+			tomoyo_set_string(head, tomoyo_mac_keywords
+					  [tomoyo_pn2mac[bit]]);
+		}
+		if (first)
+			return true;
+		tomoyo_print_name_union(head, &ptr->name);
+		tomoyo_print_number_union(head, &ptr->number);
+	} else if (acl_type == TOMOYO_TYPE_MKDEV_ACL) {
+		struct tomoyo_mkdev_acl *ptr =
+			container_of(acl, typeof(*ptr), head);
+		const u8 perm = ptr->perm;
+		for (bit = 0; bit < TOMOYO_MAX_MKDEV_OPERATION; bit++) {
+			if (!(perm & (1 << bit)))
+				continue;
+			if (first) {
+				tomoyo_set_group(head, "file ");
+				first = false;
+			} else {
+				tomoyo_set_slash(head);
+			}
+			tomoyo_set_string(head, tomoyo_mac_keywords
+					  [tomoyo_pnnn2mac[bit]]);
+		}
+		if (first)
+			return true;
+		tomoyo_print_name_union(head, &ptr->name);
+		tomoyo_print_number_union(head, &ptr->mode);
+		tomoyo_print_number_union(head, &ptr->major);
+		tomoyo_print_number_union(head, &ptr->minor);
+	} else if (acl_type == TOMOYO_TYPE_INET_ACL) {
+		struct tomoyo_inet_acl *ptr =
+			container_of(acl, typeof(*ptr), head);
+		const u8 perm = ptr->perm;
+
+		for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+			if (!(perm & (1 << bit)))
+				continue;
+			if (first) {
+				tomoyo_set_group(head, "network inet ");
+				tomoyo_set_string(head, tomoyo_proto_keyword
+						  [ptr->protocol]);
+				tomoyo_set_space(head);
+				first = false;
+			} else {
+				tomoyo_set_slash(head);
+			}
+			tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+		}
+		if (first)
+			return true;
+		tomoyo_set_space(head);
+		if (ptr->address.group) {
+			tomoyo_set_string(head, "@");
+			tomoyo_set_string(head, ptr->address.group->group_name
+					  ->name);
+		} else {
+			char buf[128];
+			tomoyo_print_ip(buf, sizeof(buf), &ptr->address);
+			tomoyo_io_printf(head, "%s", buf);
+		}
+		tomoyo_print_number_union(head, &ptr->port);
+	} else if (acl_type == TOMOYO_TYPE_UNIX_ACL) {
+		struct tomoyo_unix_acl *ptr =
+			container_of(acl, typeof(*ptr), head);
+		const u8 perm = ptr->perm;
+
+		for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+			if (!(perm & (1 << bit)))
+				continue;
+			if (first) {
+				tomoyo_set_group(head, "network unix ");
+				tomoyo_set_string(head, tomoyo_proto_keyword
+						  [ptr->protocol]);
+				tomoyo_set_space(head);
+				first = false;
+			} else {
+				tomoyo_set_slash(head);
+			}
+			tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+		}
+		if (first)
+			return true;
+		tomoyo_print_name_union(head, &ptr->name);
+	} else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) {
+		struct tomoyo_mount_acl *ptr =
+			container_of(acl, typeof(*ptr), head);
+		tomoyo_set_group(head, "file mount");
+		tomoyo_print_name_union(head, &ptr->dev_name);
+		tomoyo_print_name_union(head, &ptr->dir_name);
+		tomoyo_print_name_union(head, &ptr->fs_type);
+		tomoyo_print_number_union(head, &ptr->flags);
+	} else if (acl_type == TOMOYO_TYPE_ENV_ACL) {
+		struct tomoyo_env_acl *ptr =
+			container_of(acl, typeof(*ptr), head);
+
+		tomoyo_set_group(head, "misc env ");
+		tomoyo_set_string(head, ptr->env->name);
+	}
+	if (acl->cond) {
+		head->r.print_cond_part = true;
+		head->r.cond_step = 0;
+		if (!tomoyo_flush(head))
+			return false;
+print_cond_part:
+		if (!tomoyo_print_condition(head, acl->cond))
+			return false;
+		head->r.print_cond_part = false;
+	} else {
+		tomoyo_set_lf(head);
+	}
+	return true;
+}
+
+/**
+ * tomoyo_read_domain2 - Read domain policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @list: Pointer to "struct list_head".
+ *
+ * Caller holds tomoyo_read_lock().
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_read_domain2(struct tomoyo_io_buffer *head,
+				struct list_head *list)
+{
+	list_for_each_cookie(head->r.acl, list) {
+		struct tomoyo_acl_info *ptr =
+			list_entry(head->r.acl, typeof(*ptr), list);
+		if (!tomoyo_print_entry(head, ptr))
+			return false;
+	}
+	head->r.acl = NULL;
+	return true;
+}
+
+/**
+ * tomoyo_read_domain - Read domain policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static void tomoyo_read_domain(struct tomoyo_io_buffer *head)
+{
+	if (head->r.eof)
+		return;
+	list_for_each_cookie(head->r.domain, &tomoyo_domain_list) {
+		struct tomoyo_domain_info *domain =
+			list_entry(head->r.domain, typeof(*domain), list);
+		switch (head->r.step) {
+			u8 i;
+		case 0:
+			if (domain->is_deleted &&
+			    !head->r.print_this_domain_only)
+				continue;
+			/* Print domainname and flags. */
+			tomoyo_set_string(head, domain->domainname->name);
+			tomoyo_set_lf(head);
+			tomoyo_io_printf(head, "use_profile %u\n",
+					 domain->profile);
+			tomoyo_io_printf(head, "use_group %u\n",
+					 domain->group);
+			for (i = 0; i < TOMOYO_MAX_DOMAIN_INFO_FLAGS; i++)
+				if (domain->flags[i])
+					tomoyo_set_string(head, tomoyo_dif[i]);
+			head->r.step++;
+			tomoyo_set_lf(head);
+			/* fall through */
+		case 1:
+			if (!tomoyo_read_domain2(head, &domain->acl_info_list))
+				return;
+			head->r.step++;
+			if (!tomoyo_set_lf(head))
+				return;
+			/* fall through */
+		case 2:
+			head->r.step = 0;
+			if (head->r.print_this_domain_only)
+				goto done;
+		}
+	}
+ done:
+	head->r.eof = true;
+}
+
+/**
+ * tomoyo_write_pid: Specify PID to obtain domainname.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0.
+ */
+static int tomoyo_write_pid(struct tomoyo_io_buffer *head)
+{
+	head->r.eof = false;
+	return 0;
+}
+
+/**
+ * tomoyo_read_pid - Get domainname of the specified PID.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns the domainname which the specified PID is in on success,
+ * empty string otherwise.
+ * The PID is specified by tomoyo_write_pid() so that the user can obtain
+ * using read()/write() interface rather than sysctl() interface.
+ */
+static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
+{
+	char *buf = head->write_buf;
+	bool global_pid = false;
+	unsigned int pid;
+	struct task_struct *p;
+	struct tomoyo_domain_info *domain = NULL;
+
+	/* Accessing write_buf is safe because head->io_sem is held. */
+	if (!buf) {
+		head->r.eof = true;
+		return; /* Do nothing if open(O_RDONLY). */
+	}
+	if (head->r.w_pos || head->r.eof)
+		return;
+	head->r.eof = true;
+	if (tomoyo_str_starts(&buf, "global-pid "))
+		global_pid = true;
+	pid = (unsigned int) simple_strtoul(buf, NULL, 10);
+	rcu_read_lock();
+	if (global_pid)
+		p = find_task_by_pid_ns(pid, &init_pid_ns);
+	else
+		p = find_task_by_vpid(pid);
+	if (p)
+		domain = tomoyo_real_domain(p);
+	rcu_read_unlock();
+	if (!domain)
+		return;
+	tomoyo_io_printf(head, "%u %u ", pid, domain->profile);
+	tomoyo_set_string(head, domain->domainname->name);
+}
+
+/* String table for domain transition control keywords. */
+static const char *tomoyo_transition_type[TOMOYO_MAX_TRANSITION_TYPE] = {
+	[TOMOYO_TRANSITION_CONTROL_NO_RESET]      = "no_reset_domain ",
+	[TOMOYO_TRANSITION_CONTROL_RESET]         = "reset_domain ",
+	[TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE] = "no_initialize_domain ",
+	[TOMOYO_TRANSITION_CONTROL_INITIALIZE]    = "initialize_domain ",
+	[TOMOYO_TRANSITION_CONTROL_NO_KEEP]       = "no_keep_domain ",
+	[TOMOYO_TRANSITION_CONTROL_KEEP]          = "keep_domain ",
+};
+
+/* String table for grouping keywords. */
+static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = {
+	[TOMOYO_PATH_GROUP]    = "path_group ",
+	[TOMOYO_NUMBER_GROUP]  = "number_group ",
+	[TOMOYO_ADDRESS_GROUP] = "address_group ",
+};
+
+/**
+ * tomoyo_write_exception - Write exception policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_exception(struct tomoyo_io_buffer *head)
+{
+	const bool is_delete = head->w.is_delete;
+	struct tomoyo_acl_param param = {
+		.ns = head->w.ns,
+		.is_delete = is_delete,
+		.data = head->write_buf,
+	};
+	u8 i;
+	if (tomoyo_str_starts(&param.data, "aggregator "))
+		return tomoyo_write_aggregator(&param);
+	for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++)
+		if (tomoyo_str_starts(&param.data, tomoyo_transition_type[i]))
+			return tomoyo_write_transition_control(&param, i);
+	for (i = 0; i < TOMOYO_MAX_GROUP; i++)
+		if (tomoyo_str_starts(&param.data, tomoyo_group_name[i]))
+			return tomoyo_write_group(&param, i);
+	if (tomoyo_str_starts(&param.data, "acl_group ")) {
+		unsigned int group;
+		char *data;
+		group = simple_strtoul(param.data, &data, 10);
+		if (group < TOMOYO_MAX_ACL_GROUPS && *data++ == ' ')
+			return tomoyo_write_domain2
+				(head->w.ns, &head->w.ns->acl_group[group],
+				 data, is_delete);
+	}
+	return -EINVAL;
+}
+
+/**
+ * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @idx:  Index number.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx)
+{
+	struct tomoyo_policy_namespace *ns =
+		container_of(head->r.ns, typeof(*ns), namespace_list);
+	struct list_head *list = &ns->group_list[idx];
+	list_for_each_cookie(head->r.group, list) {
+		struct tomoyo_group *group =
+			list_entry(head->r.group, typeof(*group), head.list);
+		list_for_each_cookie(head->r.acl, &group->member_list) {
+			struct tomoyo_acl_head *ptr =
+				list_entry(head->r.acl, typeof(*ptr), list);
+			if (ptr->is_deleted)
+				continue;
+			if (!tomoyo_flush(head))
+				return false;
+			tomoyo_print_namespace(head);
+			tomoyo_set_string(head, tomoyo_group_name[idx]);
+			tomoyo_set_string(head, group->group_name->name);
+			if (idx == TOMOYO_PATH_GROUP) {
+				tomoyo_set_space(head);
+				tomoyo_set_string(head, container_of
+					       (ptr, struct tomoyo_path_group,
+						head)->member_name->name);
+			} else if (idx == TOMOYO_NUMBER_GROUP) {
+				tomoyo_print_number_union(head, &container_of
+							  (ptr,
+						   struct tomoyo_number_group,
+							   head)->number);
+			} else if (idx == TOMOYO_ADDRESS_GROUP) {
+				char buffer[128];
+
+				struct tomoyo_address_group *member =
+					container_of(ptr, typeof(*member),
+						     head);
+				tomoyo_print_ip(buffer, sizeof(buffer),
+						&member->address);
+				tomoyo_io_printf(head, " %s", buffer);
+			}
+			tomoyo_set_lf(head);
+		}
+		head->r.acl = NULL;
+	}
+	head->r.group = NULL;
+	return true;
+}
+
+/**
+ * tomoyo_read_policy - Read "struct tomoyo_..._entry" list.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @idx:  Index number.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx)
+{
+	struct tomoyo_policy_namespace *ns =
+		container_of(head->r.ns, typeof(*ns), namespace_list);
+	struct list_head *list = &ns->policy_list[idx];
+	list_for_each_cookie(head->r.acl, list) {
+		struct tomoyo_acl_head *acl =
+			container_of(head->r.acl, typeof(*acl), list);
+		if (acl->is_deleted)
+			continue;
+		if (!tomoyo_flush(head))
+			return false;
+		switch (idx) {
+		case TOMOYO_ID_TRANSITION_CONTROL:
+			{
+				struct tomoyo_transition_control *ptr =
+					container_of(acl, typeof(*ptr), head);
+				tomoyo_print_namespace(head);
+				tomoyo_set_string(head, tomoyo_transition_type
+						  [ptr->type]);
+				tomoyo_set_string(head, ptr->program ?
+						  ptr->program->name : "any");
+				tomoyo_set_string(head, " from ");
+				tomoyo_set_string(head, ptr->domainname ?
+						  ptr->domainname->name :
+						  "any");
+			}
+			break;
+		case TOMOYO_ID_AGGREGATOR:
+			{
+				struct tomoyo_aggregator *ptr =
+					container_of(acl, typeof(*ptr), head);
+				tomoyo_print_namespace(head);
+				tomoyo_set_string(head, "aggregator ");
+				tomoyo_set_string(head,
+						  ptr->original_name->name);
+				tomoyo_set_space(head);
+				tomoyo_set_string(head,
+					       ptr->aggregated_name->name);
+			}
+			break;
+		default:
+			continue;
+		}
+		tomoyo_set_lf(head);
+	}
+	head->r.acl = NULL;
+	return true;
+}
+
+/**
+ * tomoyo_read_exception - Read exception policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static void tomoyo_read_exception(struct tomoyo_io_buffer *head)
+{
+	struct tomoyo_policy_namespace *ns =
+		container_of(head->r.ns, typeof(*ns), namespace_list);
+	if (head->r.eof)
+		return;
+	while (head->r.step < TOMOYO_MAX_POLICY &&
+	       tomoyo_read_policy(head, head->r.step))
+		head->r.step++;
+	if (head->r.step < TOMOYO_MAX_POLICY)
+		return;
+	while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP &&
+	       tomoyo_read_group(head, head->r.step - TOMOYO_MAX_POLICY))
+		head->r.step++;
+	if (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP)
+		return;
+	while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP
+	       + TOMOYO_MAX_ACL_GROUPS) {
+		head->r.acl_group_index = head->r.step - TOMOYO_MAX_POLICY
+			- TOMOYO_MAX_GROUP;
+		if (!tomoyo_read_domain2(head, &ns->acl_group
+					 [head->r.acl_group_index]))
+			return;
+		head->r.step++;
+	}
+	head->r.eof = true;
+}
+
+/* Wait queue for kernel -> userspace notification. */
+static DECLARE_WAIT_QUEUE_HEAD(tomoyo_query_wait);
+/* Wait queue for userspace -> kernel notification. */
+static DECLARE_WAIT_QUEUE_HEAD(tomoyo_answer_wait);
+
+/* Structure for query. */
+struct tomoyo_query {
+	struct list_head list;
+	struct tomoyo_domain_info *domain;
+	char *query;
+	size_t query_len;
+	unsigned int serial;
+	u8 timer;
+	u8 answer;
+	u8 retry;
+};
+
+/* The list for "struct tomoyo_query". */
+static LIST_HEAD(tomoyo_query_list);
+
+/* Lock for manipulating tomoyo_query_list. */
+static DEFINE_SPINLOCK(tomoyo_query_list_lock);
+
+/*
+ * Number of "struct file" referring /sys/kernel/security/tomoyo/query
+ * interface.
+ */
+static atomic_t tomoyo_query_observers = ATOMIC_INIT(0);
+
+/**
+ * tomoyo_truncate - Truncate a line.
+ *
+ * @str: String to truncate.
+ *
+ * Returns length of truncated @str.
+ */
+static int tomoyo_truncate(char *str)
+{
+	char *start = str;
+	while (*(unsigned char *) str > (unsigned char) ' ')
+		str++;
+	*str = '\0';
+	return strlen(start) + 1;
+}
+
+/**
+ * tomoyo_add_entry - Add an ACL to current thread's domain. Used by learning mode.
+ *
+ * @domain: Pointer to "struct tomoyo_domain_info".
+ * @header: Lines containing ACL.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header)
+{
+	char *buffer;
+	char *realpath = NULL;
+	char *argv0 = NULL;
+	char *symlink = NULL;
+	char *cp = strchr(header, '\n');
+	int len;
+	if (!cp)
+		return;
+	cp = strchr(cp + 1, '\n');
+	if (!cp)
+		return;
+	*cp++ = '\0';
+	len = strlen(cp) + 1;
+	/* strstr() will return NULL if ordering is wrong. */
+	if (*cp == 'f') {
+		argv0 = strstr(header, " argv[]={ \"");
+		if (argv0) {
+			argv0 += 10;
+			len += tomoyo_truncate(argv0) + 14;
+		}
+		realpath = strstr(header, " exec={ realpath=\"");
+		if (realpath) {
+			realpath += 8;
+			len += tomoyo_truncate(realpath) + 6;
+		}
+		symlink = strstr(header, " symlink.target=\"");
+		if (symlink)
+			len += tomoyo_truncate(symlink + 1) + 1;
+	}
+	buffer = kmalloc(len, GFP_NOFS);
+	if (!buffer)
+		return;
+	snprintf(buffer, len - 1, "%s", cp);
+	if (realpath)
+		tomoyo_addprintf(buffer, len, " exec.%s", realpath);
+	if (argv0)
+		tomoyo_addprintf(buffer, len, " exec.argv[0]=%s", argv0);
+	if (symlink)
+		tomoyo_addprintf(buffer, len, "%s", symlink);
+	tomoyo_normalize_line(buffer);
+	if (!tomoyo_write_domain2(domain->ns, &domain->acl_info_list, buffer,
+				  false))
+		tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
+	kfree(buffer);
+}
+
+/**
+ * tomoyo_supervisor - Ask for the supervisor's decision.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @fmt: The printf()'s format string, followed by parameters.
+ *
+ * Returns 0 if the supervisor decided to permit the access request which
+ * violated the policy in enforcing mode, TOMOYO_RETRY_REQUEST if the
+ * supervisor decided to retry the access request which violated the policy in
+ * enforcing mode, 0 if it is not in enforcing mode, -EPERM otherwise.
+ */
+int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
+{
+	va_list args;
+	int error;
+	int len;
+	static unsigned int tomoyo_serial;
+	struct tomoyo_query entry = { };
+	bool quota_exceeded = false;
+	va_start(args, fmt);
+	len = vsnprintf((char *) &len, 1, fmt, args) + 1;
+	va_end(args);
+	/* Write /sys/kernel/security/tomoyo/audit. */
+	va_start(args, fmt);
+	tomoyo_write_log2(r, len, fmt, args);
+	va_end(args);
+	/* Nothing more to do if granted. */
+	if (r->granted)
+		return 0;
+	if (r->mode)
+		tomoyo_update_stat(r->mode);
+	switch (r->mode) {
+	case TOMOYO_CONFIG_ENFORCING:
+		error = -EPERM;
+		if (atomic_read(&tomoyo_query_observers))
+			break;
+		goto out;
+	case TOMOYO_CONFIG_LEARNING:
+		error = 0;
+		/* Check max_learning_entry parameter. */
+		if (tomoyo_domain_quota_is_ok(r))
+			break;
+		/* fall through */
+	default:
+		return 0;
+	}
+	/* Get message. */
+	va_start(args, fmt);
+	entry.query = tomoyo_init_log(r, len, fmt, args);
+	va_end(args);
+	if (!entry.query)
+		goto out;
+	entry.query_len = strlen(entry.query) + 1;
+	if (!error) {
+		tomoyo_add_entry(r->domain, entry.query);
+		goto out;
+	}
+	len = tomoyo_round2(entry.query_len);
+	entry.domain = r->domain;
+	spin_lock(&tomoyo_query_list_lock);
+	if (tomoyo_memory_quota[TOMOYO_MEMORY_QUERY] &&
+	    tomoyo_memory_used[TOMOYO_MEMORY_QUERY] + len
+	    >= tomoyo_memory_quota[TOMOYO_MEMORY_QUERY]) {
+		quota_exceeded = true;
+	} else {
+		entry.serial = tomoyo_serial++;
+		entry.retry = r->retry;
+		tomoyo_memory_used[TOMOYO_MEMORY_QUERY] += len;
+		list_add_tail(&entry.list, &tomoyo_query_list);
+	}
+	spin_unlock(&tomoyo_query_list_lock);
+	if (quota_exceeded)
+		goto out;
+	/* Give 10 seconds for supervisor's opinion. */
+	while (entry.timer < 10) {
+		wake_up_all(&tomoyo_query_wait);
+		if (wait_event_interruptible_timeout
+		    (tomoyo_answer_wait, entry.answer ||
+		     !atomic_read(&tomoyo_query_observers), HZ))
+			break;
+		else
+			entry.timer++;
+	}
+	spin_lock(&tomoyo_query_list_lock);
+	list_del(&entry.list);
+	tomoyo_memory_used[TOMOYO_MEMORY_QUERY] -= len;
+	spin_unlock(&tomoyo_query_list_lock);
+	switch (entry.answer) {
+	case 3: /* Asked to retry by administrator. */
+		error = TOMOYO_RETRY_REQUEST;
+		r->retry++;
+		break;
+	case 1:
+		/* Granted by administrator. */
+		error = 0;
+		break;
+	default:
+		/* Timed out or rejected by administrator. */
+		break;
+	}
+out:
+	kfree(entry.query);
+	return error;
+}
+
+/**
+ * tomoyo_find_domain_by_qid - Get domain by query id.
+ *
+ * @serial: Query ID assigned by tomoyo_supervisor().
+ *
+ * Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
+ */
+static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
+(unsigned int serial)
+{
+	struct tomoyo_query *ptr;
+	struct tomoyo_domain_info *domain = NULL;
+	spin_lock(&tomoyo_query_list_lock);
+	list_for_each_entry(ptr, &tomoyo_query_list, list) {
+		if (ptr->serial != serial)
+			continue;
+		domain = ptr->domain;
+		break;
+	}
+	spin_unlock(&tomoyo_query_list_lock);
+	return domain;
+}
+
+/**
+ * tomoyo_poll_query - poll() for /sys/kernel/security/tomoyo/query.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table".
+ *
+ * Returns POLLIN | POLLRDNORM when ready to read, 0 otherwise.
+ *
+ * Waits for access requests which violated policy in enforcing mode.
+ */
+static unsigned int tomoyo_poll_query(struct file *file, poll_table *wait)
+{
+	if (!list_empty(&tomoyo_query_list))
+		return POLLIN | POLLRDNORM;
+	poll_wait(file, &tomoyo_query_wait, wait);
+	if (!list_empty(&tomoyo_query_list))
+		return POLLIN | POLLRDNORM;
+	return 0;
+}
+
+/**
+ * tomoyo_read_query - Read access requests which violated policy in enforcing mode.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ */
+static void tomoyo_read_query(struct tomoyo_io_buffer *head)
+{
+	struct list_head *tmp;
+	unsigned int pos = 0;
+	size_t len = 0;
+	char *buf;
+	if (head->r.w_pos)
+		return;
+	if (head->read_buf) {
+		kfree(head->read_buf);
+		head->read_buf = NULL;
+	}
+	spin_lock(&tomoyo_query_list_lock);
+	list_for_each(tmp, &tomoyo_query_list) {
+		struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+		if (pos++ != head->r.query_index)
+			continue;
+		len = ptr->query_len;
+		break;
+	}
+	spin_unlock(&tomoyo_query_list_lock);
+	if (!len) {
+		head->r.query_index = 0;
+		return;
+	}
+	buf = kzalloc(len + 32, GFP_NOFS);
+	if (!buf)
+		return;
+	pos = 0;
+	spin_lock(&tomoyo_query_list_lock);
+	list_for_each(tmp, &tomoyo_query_list) {
+		struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+		if (pos++ != head->r.query_index)
+			continue;
+		/*
+		 * Some query can be skipped because tomoyo_query_list
+		 * can change, but I don't care.
+		 */
+		if (len == ptr->query_len)
+			snprintf(buf, len + 31, "Q%u-%hu\n%s", ptr->serial,
+				 ptr->retry, ptr->query);
+		break;
+	}
+	spin_unlock(&tomoyo_query_list_lock);
+	if (buf[0]) {
+		head->read_buf = buf;
+		head->r.w[head->r.w_pos++] = buf;
+		head->r.query_index++;
+	} else {
+		kfree(buf);
+	}
+}
+
+/**
+ * tomoyo_write_answer - Write the supervisor's decision.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, -EINVAL otherwise.
+ */
+static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
+{
+	char *data = head->write_buf;
+	struct list_head *tmp;
+	unsigned int serial;
+	unsigned int answer;
+	spin_lock(&tomoyo_query_list_lock);
+	list_for_each(tmp, &tomoyo_query_list) {
+		struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+		ptr->timer = 0;
+	}
+	spin_unlock(&tomoyo_query_list_lock);
+	if (sscanf(data, "A%u=%u", &serial, &answer) != 2)
+		return -EINVAL;
+	spin_lock(&tomoyo_query_list_lock);
+	list_for_each(tmp, &tomoyo_query_list) {
+		struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+		if (ptr->serial != serial)
+			continue;
+		ptr->answer = answer;
+		/* Remove from tomoyo_query_list. */
+		if (ptr->answer)
+			list_del_init(&ptr->list);
+		break;
+	}
+	spin_unlock(&tomoyo_query_list_lock);
+	return 0;
+}
+
+/**
+ * tomoyo_read_version: Get version.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns version information.
+ */
+static void tomoyo_read_version(struct tomoyo_io_buffer *head)
+{
+	if (!head->r.eof) {
+		tomoyo_io_printf(head, "2.5.0");
+		head->r.eof = true;
+	}
+}
+
+/* String table for /sys/kernel/security/tomoyo/stat interface. */
+static const char * const tomoyo_policy_headers[TOMOYO_MAX_POLICY_STAT] = {
+	[TOMOYO_STAT_POLICY_UPDATES]    = "update:",
+	[TOMOYO_STAT_POLICY_LEARNING]   = "violation in learning mode:",
+	[TOMOYO_STAT_POLICY_PERMISSIVE] = "violation in permissive mode:",
+	[TOMOYO_STAT_POLICY_ENFORCING]  = "violation in enforcing mode:",
+};
+
+/* String table for /sys/kernel/security/tomoyo/stat interface. */
+static const char * const tomoyo_memory_headers[TOMOYO_MAX_MEMORY_STAT] = {
+	[TOMOYO_MEMORY_POLICY] = "policy:",
+	[TOMOYO_MEMORY_AUDIT]  = "audit log:",
+	[TOMOYO_MEMORY_QUERY]  = "query message:",
+};
+
+/* Timestamp counter for last updated. */
+static unsigned int tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT];
+/* Counter for number of updates. */
+static unsigned int tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
+
+/**
+ * tomoyo_update_stat - Update statistic counters.
+ *
+ * @index: Index for policy type.
+ *
+ * Returns nothing.
+ */
+void tomoyo_update_stat(const u8 index)
+{
+	/*
+	 * I don't use atomic operations because race condition is not fatal.
+	 */
+	tomoyo_stat_updated[index]++;
+	tomoyo_stat_modified[index] = get_seconds();
+}
+
+/**
+ * tomoyo_read_stat - Read statistic data.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_read_stat(struct tomoyo_io_buffer *head)
+{
+	u8 i;
+	unsigned int total = 0;
+	if (head->r.eof)
+		return;
+	for (i = 0; i < TOMOYO_MAX_POLICY_STAT; i++) {
+		tomoyo_io_printf(head, "Policy %-30s %10u",
+				 tomoyo_policy_headers[i],
+				 tomoyo_stat_updated[i]);
+		if (tomoyo_stat_modified[i]) {
+			struct tomoyo_time stamp;
+			tomoyo_convert_time(tomoyo_stat_modified[i], &stamp);
+			tomoyo_io_printf(head, " (Last: %04u/%02u/%02u "
+					 "%02u:%02u:%02u)",
+					 stamp.year, stamp.month, stamp.day,
+					 stamp.hour, stamp.min, stamp.sec);
+		}
+		tomoyo_set_lf(head);
+	}
+	for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++) {
+		unsigned int used = tomoyo_memory_used[i];
+		total += used;
+		tomoyo_io_printf(head, "Memory used by %-22s %10u",
+				 tomoyo_memory_headers[i], used);
+		used = tomoyo_memory_quota[i];
+		if (used)
+			tomoyo_io_printf(head, " (Quota: %10u)", used);
+		tomoyo_set_lf(head);
+	}
+	tomoyo_io_printf(head, "Total memory used:                    %10u\n",
+			 total);
+	head->r.eof = true;
+}
+
+/**
+ * tomoyo_write_stat - Set memory quota.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0.
+ */
+static int tomoyo_write_stat(struct tomoyo_io_buffer *head)
+{
+	char *data = head->write_buf;
+	u8 i;
+	if (tomoyo_str_starts(&data, "Memory used by "))
+		for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++)
+			if (tomoyo_str_starts(&data, tomoyo_memory_headers[i]))
+				sscanf(data, "%u", &tomoyo_memory_quota[i]);
+	return 0;
+}
+
+/**
+ * tomoyo_open_control - open() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @type: Type of interface.
+ * @file: Pointer to "struct file".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_open_control(const u8 type, struct file *file)
+{
+	struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_NOFS);
+
+	if (!head)
+		return -ENOMEM;
+	mutex_init(&head->io_sem);
+	head->type = type;
+	switch (type) {
+	case TOMOYO_DOMAINPOLICY:
+		/* /sys/kernel/security/tomoyo/domain_policy */
+		head->write = tomoyo_write_domain;
+		head->read = tomoyo_read_domain;
+		break;
+	case TOMOYO_EXCEPTIONPOLICY:
+		/* /sys/kernel/security/tomoyo/exception_policy */
+		head->write = tomoyo_write_exception;
+		head->read = tomoyo_read_exception;
+		break;
+	case TOMOYO_AUDIT:
+		/* /sys/kernel/security/tomoyo/audit */
+		head->poll = tomoyo_poll_log;
+		head->read = tomoyo_read_log;
+		break;
+	case TOMOYO_PROCESS_STATUS:
+		/* /sys/kernel/security/tomoyo/.process_status */
+		head->write = tomoyo_write_pid;
+		head->read = tomoyo_read_pid;
+		break;
+	case TOMOYO_VERSION:
+		/* /sys/kernel/security/tomoyo/version */
+		head->read = tomoyo_read_version;
+		head->readbuf_size = 128;
+		break;
+	case TOMOYO_STAT:
+		/* /sys/kernel/security/tomoyo/stat */
+		head->write = tomoyo_write_stat;
+		head->read = tomoyo_read_stat;
+		head->readbuf_size = 1024;
+		break;
+	case TOMOYO_PROFILE:
+		/* /sys/kernel/security/tomoyo/profile */
+		head->write = tomoyo_write_profile;
+		head->read = tomoyo_read_profile;
+		break;
+	case TOMOYO_QUERY: /* /sys/kernel/security/tomoyo/query */
+		head->poll = tomoyo_poll_query;
+		head->write = tomoyo_write_answer;
+		head->read = tomoyo_read_query;
+		break;
+	case TOMOYO_MANAGER:
+		/* /sys/kernel/security/tomoyo/manager */
+		head->write = tomoyo_write_manager;
+		head->read = tomoyo_read_manager;
+		break;
+	}
+	if (!(file->f_mode & FMODE_READ)) {
+		/*
+		 * No need to allocate read_buf since it is not opened
+		 * for reading.
+		 */
+		head->read = NULL;
+		head->poll = NULL;
+	} else if (!head->poll) {
+		/* Don't allocate read_buf for poll() access. */
+		if (!head->readbuf_size)
+			head->readbuf_size = 4096 * 2;
+		head->read_buf = kzalloc(head->readbuf_size, GFP_NOFS);
+		if (!head->read_buf) {
+			kfree(head);
+			return -ENOMEM;
+		}
+	}
+	if (!(file->f_mode & FMODE_WRITE)) {
+		/*
+		 * No need to allocate write_buf since it is not opened
+		 * for writing.
+		 */
+		head->write = NULL;
+	} else if (head->write) {
+		head->writebuf_size = 4096 * 2;
+		head->write_buf = kzalloc(head->writebuf_size, GFP_NOFS);
+		if (!head->write_buf) {
+			kfree(head->read_buf);
+			kfree(head);
+			return -ENOMEM;
+		}
+	}
+	/*
+	 * If the file is /sys/kernel/security/tomoyo/query , increment the
+	 * observer counter.
+	 * The obserber counter is used by tomoyo_supervisor() to see if
+	 * there is some process monitoring /sys/kernel/security/tomoyo/query.
+	 */
+	if (type == TOMOYO_QUERY)
+		atomic_inc(&tomoyo_query_observers);
+	file->private_data = head;
+	tomoyo_notify_gc(head, true);
+	return 0;
+}
+
+/**
+ * tomoyo_poll_control - poll() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table". Maybe NULL.
+ *
+ * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
+ * POLLOUT | POLLWRNORM otherwise.
+ */
+unsigned int tomoyo_poll_control(struct file *file, poll_table *wait)
+{
+	struct tomoyo_io_buffer *head = file->private_data;
+	if (head->poll)
+		return head->poll(file, wait) | POLLOUT | POLLWRNORM;
+	return POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
+}
+
+/**
+ * tomoyo_set_namespace_cursor - Set namespace to read.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_set_namespace_cursor(struct tomoyo_io_buffer *head)
+{
+	struct list_head *ns;
+	if (head->type != TOMOYO_EXCEPTIONPOLICY &&
+	    head->type != TOMOYO_PROFILE)
+		return;
+	/*
+	 * If this is the first read, or reading previous namespace finished
+	 * and has more namespaces to read, update the namespace cursor.
+	 */
+	ns = head->r.ns;
+	if (!ns || (head->r.eof && ns->next != &tomoyo_namespace_list)) {
+		/* Clearing is OK because tomoyo_flush() returned true. */
+		memset(&head->r, 0, sizeof(head->r));
+		head->r.ns = ns ? ns->next : tomoyo_namespace_list.next;
+	}
+}
+
+/**
+ * tomoyo_has_more_namespace - Check for unread namespaces.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns true if we have more entries to print, false otherwise.
+ */
+static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head)
+{
+	return (head->type == TOMOYO_EXCEPTIONPOLICY ||
+		head->type == TOMOYO_PROFILE) && head->r.eof &&
+		head->r.ns->next != &tomoyo_namespace_list;
+}
+
+/**
+ * tomoyo_read_control - read() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @head:       Pointer to "struct tomoyo_io_buffer".
+ * @buffer:     Poiner to buffer to write to.
+ * @buffer_len: Size of @buffer.
+ *
+ * Returns bytes read on success, negative value otherwise.
+ */
+ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
+			    const int buffer_len)
+{
+	int len;
+	int idx;
+
+	if (!head->read)
+		return -ENOSYS;
+	if (mutex_lock_interruptible(&head->io_sem))
+		return -EINTR;
+	head->read_user_buf = buffer;
+	head->read_user_buf_avail = buffer_len;
+	idx = tomoyo_read_lock();
+	if (tomoyo_flush(head))
+		/* Call the policy handler. */
+		do {
+			tomoyo_set_namespace_cursor(head);
+			head->read(head);
+		} while (tomoyo_flush(head) &&
+			 tomoyo_has_more_namespace(head));
+	tomoyo_read_unlock(idx);
+	len = head->read_user_buf - buffer;
+	mutex_unlock(&head->io_sem);
+	return len;
+}
+
+/**
+ * tomoyo_parse_policy - Parse a policy line.
+ *
+ * @head: Poiter to "struct tomoyo_io_buffer".
+ * @line: Line to parse.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line)
+{
+	/* Delete request? */
+	head->w.is_delete = !strncmp(line, "delete ", 7);
+	if (head->w.is_delete)
+		memmove(line, line + 7, strlen(line + 7) + 1);
+	/* Selecting namespace to update. */
+	if (head->type == TOMOYO_EXCEPTIONPOLICY ||
+	    head->type == TOMOYO_PROFILE) {
+		if (*line == '<') {
+			char *cp = strchr(line, ' ');
+			if (cp) {
+				*cp++ = '\0';
+				head->w.ns = tomoyo_assign_namespace(line);
+				memmove(line, cp, strlen(cp) + 1);
+			} else
+				head->w.ns = NULL;
+		} else
+			head->w.ns = &tomoyo_kernel_namespace;
+		/* Don't allow updating if namespace is invalid. */
+		if (!head->w.ns)
+			return -ENOENT;
+	}
+	/* Do the update. */
+	return head->write(head);
+}
+
+/**
+ * tomoyo_write_control - write() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @head:       Pointer to "struct tomoyo_io_buffer".
+ * @buffer:     Pointer to buffer to read from.
+ * @buffer_len: Size of @buffer.
+ *
+ * Returns @buffer_len on success, negative value otherwise.
+ */
+ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
+			     const char __user *buffer, const int buffer_len)
+{
+	int error = buffer_len;
+	size_t avail_len = buffer_len;
+	char *cp0 = head->write_buf;
+	int idx;
+	if (!head->write)
+		return -ENOSYS;
+	if (!access_ok(VERIFY_READ, buffer, buffer_len))
+		return -EFAULT;
+	if (mutex_lock_interruptible(&head->io_sem))
+		return -EINTR;
+	head->read_user_buf_avail = 0;
+	idx = tomoyo_read_lock();
+	/* Read a line and dispatch it to the policy handler. */
+	while (avail_len > 0) {
+		char c;
+		if (head->w.avail >= head->writebuf_size - 1) {
+			const int len = head->writebuf_size * 2;
+			char *cp = kzalloc(len, GFP_NOFS);
+			if (!cp) {
+				error = -ENOMEM;
+				break;
+			}
+			memmove(cp, cp0, head->w.avail);
+			kfree(cp0);
+			head->write_buf = cp;
+			cp0 = cp;
+			head->writebuf_size = len;
+		}
+		if (get_user(c, buffer)) {
+			error = -EFAULT;
+			break;
+		}
+		buffer++;
+		avail_len--;
+		cp0[head->w.avail++] = c;
+		if (c != '\n')
+			continue;
+		cp0[head->w.avail - 1] = '\0';
+		head->w.avail = 0;
+		tomoyo_normalize_line(cp0);
+		if (!strcmp(cp0, "reset")) {
+			head->w.ns = &tomoyo_kernel_namespace;
+			head->w.domain = NULL;
+			memset(&head->r, 0, sizeof(head->r));
+			continue;
+		}
+		/* Don't allow updating policies by non manager programs. */
+		switch (head->type) {
+		case TOMOYO_PROCESS_STATUS:
+			/* This does not write anything. */
+			break;
+		case TOMOYO_DOMAINPOLICY:
+			if (tomoyo_select_domain(head, cp0))
+				continue;
+			/* fall through */
+		case TOMOYO_EXCEPTIONPOLICY:
+			if (!strcmp(cp0, "select transition_only")) {
+				head->r.print_transition_related_only = true;
+				continue;
+			}
+			/* fall through */
+		default:
+			if (!tomoyo_manager()) {
+				error = -EPERM;
+				goto out;
+			}
+		}
+		switch (tomoyo_parse_policy(head, cp0)) {
+		case -EPERM:
+			error = -EPERM;
+			goto out;
+		case 0:
+			switch (head->type) {
+			case TOMOYO_DOMAINPOLICY:
+			case TOMOYO_EXCEPTIONPOLICY:
+			case TOMOYO_STAT:
+			case TOMOYO_PROFILE:
+			case TOMOYO_MANAGER:
+				tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
+				break;
+			default:
+				break;
+			}
+			break;
+		}
+	}
+out:
+	tomoyo_read_unlock(idx);
+	mutex_unlock(&head->io_sem);
+	return error;
+}
+
+/**
+ * tomoyo_close_control - close() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ */
+void tomoyo_close_control(struct tomoyo_io_buffer *head)
+{
+	/*
+	 * If the file is /sys/kernel/security/tomoyo/query , decrement the
+	 * observer counter.
+	 */
+	if (head->type == TOMOYO_QUERY &&
+	    atomic_dec_and_test(&tomoyo_query_observers))
+		wake_up_all(&tomoyo_answer_wait);
+	tomoyo_notify_gc(head, false);
+}
+
+/**
+ * tomoyo_check_profile - Check all profiles currently assigned to domains are defined.
+ */
+void tomoyo_check_profile(void)
+{
+	struct tomoyo_domain_info *domain;
+	const int idx = tomoyo_read_lock();
+	tomoyo_policy_loaded = true;
+	printk(KERN_INFO "TOMOYO: 2.5.0\n");
+	list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+		const u8 profile = domain->profile;
+		const struct tomoyo_policy_namespace *ns = domain->ns;
+		if (ns->profile_version != 20110903)
+			printk(KERN_ERR
+			       "Profile version %u is not supported.\n",
+			       ns->profile_version);
+		else if (!ns->profile_ptr[profile])
+			printk(KERN_ERR
+			       "Profile %u (used by '%s') is not defined.\n",
+			       profile, domain->domainname->name);
+		else
+			continue;
+		printk(KERN_ERR
+		       "Userland tools for TOMOYO 2.5 must be installed and "
+		       "policy must be initialized.\n");
+		printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.5/ "
+		       "for more information.\n");
+		panic("STOP!");
+	}
+	tomoyo_read_unlock(idx);
+	printk(KERN_INFO "Mandatory Access Control activated.\n");
+}
+
+/**
+ * tomoyo_load_builtin_policy - Load built-in policy.
+ *
+ * Returns nothing.
+ */
+void __init tomoyo_load_builtin_policy(void)
+{
+	/*
+	 * This include file is manually created and contains built-in policy
+	 * named "tomoyo_builtin_profile", "tomoyo_builtin_exception_policy",
+	 * "tomoyo_builtin_domain_policy", "tomoyo_builtin_manager",
+	 * "tomoyo_builtin_stat" in the form of "static char [] __initdata".
+	 */
+#include "builtin-policy.h"
+	u8 i;
+	const int idx = tomoyo_read_lock();
+	for (i = 0; i < 5; i++) {
+		struct tomoyo_io_buffer head = { };
+		char *start = "";
+		switch (i) {
+		case 0:
+			start = tomoyo_builtin_profile;
+			head.type = TOMOYO_PROFILE;
+			head.write = tomoyo_write_profile;
+			break;
+		case 1:
+			start = tomoyo_builtin_exception_policy;
+			head.type = TOMOYO_EXCEPTIONPOLICY;
+			head.write = tomoyo_write_exception;
+			break;
+		case 2:
+			start = tomoyo_builtin_domain_policy;
+			head.type = TOMOYO_DOMAINPOLICY;
+			head.write = tomoyo_write_domain;
+			break;
+		case 3:
+			start = tomoyo_builtin_manager;
+			head.type = TOMOYO_MANAGER;
+			head.write = tomoyo_write_manager;
+			break;
+		case 4:
+			start = tomoyo_builtin_stat;
+			head.type = TOMOYO_STAT;
+			head.write = tomoyo_write_stat;
+			break;
+		}
+		while (1) {
+			char *end = strchr(start, '\n');
+			if (!end)
+				break;
+			*end = '\0';
+			tomoyo_normalize_line(start);
+			head.write_buf = start;
+			tomoyo_parse_policy(&head, start);
+			start = end + 1;
+		}
+	}
+	tomoyo_read_unlock(idx);
+#ifdef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+	tomoyo_check_profile();
+#endif
+}
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
new file mode 100644
index 0000000..f9c9fb1
--- /dev/null
+++ b/security/tomoyo/common.h
@@ -0,0 +1,1330 @@
+/*
+ * security/tomoyo/common.h
+ *
+ * Header file for TOMOYO.
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#ifndef _SECURITY_TOMOYO_COMMON_H
+#define _SECURITY_TOMOYO_COMMON_H
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/kmod.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/list.h>
+#include <linux/cred.h>
+#include <linux/poll.h>
+#include <linux/binfmts.h>
+#include <linux/highmem.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/un.h>
+#include <net/sock.h>
+#include <net/af_unix.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+/********** Constants definitions. **********/
+
+/*
+ * TOMOYO uses this hash only when appending a string into the string
+ * table. Frequency of appending strings is very low. So we don't need
+ * large (e.g. 64k) hash size. 256 will be sufficient.
+ */
+#define TOMOYO_HASH_BITS  8
+#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
+
+/*
+ * TOMOYO checks only SOCK_STREAM, SOCK_DGRAM, SOCK_RAW, SOCK_SEQPACKET.
+ * Therefore, we don't need SOCK_MAX.
+ */
+#define TOMOYO_SOCK_MAX 6
+
+#define TOMOYO_EXEC_TMPSIZE     4096
+
+/* Garbage collector is trying to kfree() this element. */
+#define TOMOYO_GC_IN_PROGRESS -1
+
+/* Profile number is an integer between 0 and 255. */
+#define TOMOYO_MAX_PROFILES 256
+
+/* Group number is an integer between 0 and 255. */
+#define TOMOYO_MAX_ACL_GROUPS 256
+
+/* Index numbers for "struct tomoyo_condition". */
+enum tomoyo_conditions_index {
+	TOMOYO_TASK_UID,             /* current_uid()   */
+	TOMOYO_TASK_EUID,            /* current_euid()  */
+	TOMOYO_TASK_SUID,            /* current_suid()  */
+	TOMOYO_TASK_FSUID,           /* current_fsuid() */
+	TOMOYO_TASK_GID,             /* current_gid()   */
+	TOMOYO_TASK_EGID,            /* current_egid()  */
+	TOMOYO_TASK_SGID,            /* current_sgid()  */
+	TOMOYO_TASK_FSGID,           /* current_fsgid() */
+	TOMOYO_TASK_PID,             /* sys_getpid()   */
+	TOMOYO_TASK_PPID,            /* sys_getppid()  */
+	TOMOYO_EXEC_ARGC,            /* "struct linux_binprm *"->argc */
+	TOMOYO_EXEC_ENVC,            /* "struct linux_binprm *"->envc */
+	TOMOYO_TYPE_IS_SOCKET,       /* S_IFSOCK */
+	TOMOYO_TYPE_IS_SYMLINK,      /* S_IFLNK */
+	TOMOYO_TYPE_IS_FILE,         /* S_IFREG */
+	TOMOYO_TYPE_IS_BLOCK_DEV,    /* S_IFBLK */
+	TOMOYO_TYPE_IS_DIRECTORY,    /* S_IFDIR */
+	TOMOYO_TYPE_IS_CHAR_DEV,     /* S_IFCHR */
+	TOMOYO_TYPE_IS_FIFO,         /* S_IFIFO */
+	TOMOYO_MODE_SETUID,          /* S_ISUID */
+	TOMOYO_MODE_SETGID,          /* S_ISGID */
+	TOMOYO_MODE_STICKY,          /* S_ISVTX */
+	TOMOYO_MODE_OWNER_READ,      /* S_IRUSR */
+	TOMOYO_MODE_OWNER_WRITE,     /* S_IWUSR */
+	TOMOYO_MODE_OWNER_EXECUTE,   /* S_IXUSR */
+	TOMOYO_MODE_GROUP_READ,      /* S_IRGRP */
+	TOMOYO_MODE_GROUP_WRITE,     /* S_IWGRP */
+	TOMOYO_MODE_GROUP_EXECUTE,   /* S_IXGRP */
+	TOMOYO_MODE_OTHERS_READ,     /* S_IROTH */
+	TOMOYO_MODE_OTHERS_WRITE,    /* S_IWOTH */
+	TOMOYO_MODE_OTHERS_EXECUTE,  /* S_IXOTH */
+	TOMOYO_EXEC_REALPATH,
+	TOMOYO_SYMLINK_TARGET,
+	TOMOYO_PATH1_UID,
+	TOMOYO_PATH1_GID,
+	TOMOYO_PATH1_INO,
+	TOMOYO_PATH1_MAJOR,
+	TOMOYO_PATH1_MINOR,
+	TOMOYO_PATH1_PERM,
+	TOMOYO_PATH1_TYPE,
+	TOMOYO_PATH1_DEV_MAJOR,
+	TOMOYO_PATH1_DEV_MINOR,
+	TOMOYO_PATH2_UID,
+	TOMOYO_PATH2_GID,
+	TOMOYO_PATH2_INO,
+	TOMOYO_PATH2_MAJOR,
+	TOMOYO_PATH2_MINOR,
+	TOMOYO_PATH2_PERM,
+	TOMOYO_PATH2_TYPE,
+	TOMOYO_PATH2_DEV_MAJOR,
+	TOMOYO_PATH2_DEV_MINOR,
+	TOMOYO_PATH1_PARENT_UID,
+	TOMOYO_PATH1_PARENT_GID,
+	TOMOYO_PATH1_PARENT_INO,
+	TOMOYO_PATH1_PARENT_PERM,
+	TOMOYO_PATH2_PARENT_UID,
+	TOMOYO_PATH2_PARENT_GID,
+	TOMOYO_PATH2_PARENT_INO,
+	TOMOYO_PATH2_PARENT_PERM,
+	TOMOYO_MAX_CONDITION_KEYWORD,
+	TOMOYO_NUMBER_UNION,
+	TOMOYO_NAME_UNION,
+	TOMOYO_ARGV_ENTRY,
+	TOMOYO_ENVP_ENTRY,
+};
+
+
+/* Index numbers for stat(). */
+enum tomoyo_path_stat_index {
+	/* Do not change this order. */
+	TOMOYO_PATH1,
+	TOMOYO_PATH1_PARENT,
+	TOMOYO_PATH2,
+	TOMOYO_PATH2_PARENT,
+	TOMOYO_MAX_PATH_STAT
+};
+
+/* Index numbers for operation mode. */
+enum tomoyo_mode_index {
+	TOMOYO_CONFIG_DISABLED,
+	TOMOYO_CONFIG_LEARNING,
+	TOMOYO_CONFIG_PERMISSIVE,
+	TOMOYO_CONFIG_ENFORCING,
+	TOMOYO_CONFIG_MAX_MODE,
+	TOMOYO_CONFIG_WANT_REJECT_LOG =  64,
+	TOMOYO_CONFIG_WANT_GRANT_LOG  = 128,
+	TOMOYO_CONFIG_USE_DEFAULT     = 255,
+};
+
+/* Index numbers for entry type. */
+enum tomoyo_policy_id {
+	TOMOYO_ID_GROUP,
+	TOMOYO_ID_ADDRESS_GROUP,
+	TOMOYO_ID_PATH_GROUP,
+	TOMOYO_ID_NUMBER_GROUP,
+	TOMOYO_ID_TRANSITION_CONTROL,
+	TOMOYO_ID_AGGREGATOR,
+	TOMOYO_ID_MANAGER,
+	TOMOYO_ID_CONDITION,
+	TOMOYO_ID_NAME,
+	TOMOYO_ID_ACL,
+	TOMOYO_ID_DOMAIN,
+	TOMOYO_MAX_POLICY
+};
+
+/* Index numbers for domain's attributes. */
+enum tomoyo_domain_info_flags_index {
+	/* Quota warnning flag.   */
+	TOMOYO_DIF_QUOTA_WARNED,
+	/*
+	 * This domain was unable to create a new domain at
+	 * tomoyo_find_next_domain() because the name of the domain to be
+	 * created was too long or it could not allocate memory.
+	 * More than one process continued execve() without domain transition.
+	 */
+	TOMOYO_DIF_TRANSITION_FAILED,
+	TOMOYO_MAX_DOMAIN_INFO_FLAGS
+};
+
+/* Index numbers for audit type. */
+enum tomoyo_grant_log {
+	/* Follow profile's configuration. */
+	TOMOYO_GRANTLOG_AUTO,
+	/* Do not generate grant log. */
+	TOMOYO_GRANTLOG_NO,
+	/* Generate grant_log. */
+	TOMOYO_GRANTLOG_YES,
+};
+
+/* Index numbers for group entries. */
+enum tomoyo_group_id {
+	TOMOYO_PATH_GROUP,
+	TOMOYO_NUMBER_GROUP,
+	TOMOYO_ADDRESS_GROUP,
+	TOMOYO_MAX_GROUP
+};
+
+/* Index numbers for type of numeric values. */
+enum tomoyo_value_type {
+	TOMOYO_VALUE_TYPE_INVALID,
+	TOMOYO_VALUE_TYPE_DECIMAL,
+	TOMOYO_VALUE_TYPE_OCTAL,
+	TOMOYO_VALUE_TYPE_HEXADECIMAL,
+};
+
+/* Index numbers for domain transition control keywords. */
+enum tomoyo_transition_type {
+	/* Do not change this order, */
+	TOMOYO_TRANSITION_CONTROL_NO_RESET,
+	TOMOYO_TRANSITION_CONTROL_RESET,
+	TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE,
+	TOMOYO_TRANSITION_CONTROL_INITIALIZE,
+	TOMOYO_TRANSITION_CONTROL_NO_KEEP,
+	TOMOYO_TRANSITION_CONTROL_KEEP,
+	TOMOYO_MAX_TRANSITION_TYPE
+};
+
+/* Index numbers for Access Controls. */
+enum tomoyo_acl_entry_type_index {
+	TOMOYO_TYPE_PATH_ACL,
+	TOMOYO_TYPE_PATH2_ACL,
+	TOMOYO_TYPE_PATH_NUMBER_ACL,
+	TOMOYO_TYPE_MKDEV_ACL,
+	TOMOYO_TYPE_MOUNT_ACL,
+	TOMOYO_TYPE_INET_ACL,
+	TOMOYO_TYPE_UNIX_ACL,
+	TOMOYO_TYPE_ENV_ACL,
+	TOMOYO_TYPE_MANUAL_TASK_ACL,
+};
+
+/* Index numbers for access controls with one pathname. */
+enum tomoyo_path_acl_index {
+	TOMOYO_TYPE_EXECUTE,
+	TOMOYO_TYPE_READ,
+	TOMOYO_TYPE_WRITE,
+	TOMOYO_TYPE_APPEND,
+	TOMOYO_TYPE_UNLINK,
+	TOMOYO_TYPE_GETATTR,
+	TOMOYO_TYPE_RMDIR,
+	TOMOYO_TYPE_TRUNCATE,
+	TOMOYO_TYPE_SYMLINK,
+	TOMOYO_TYPE_CHROOT,
+	TOMOYO_TYPE_UMOUNT,
+	TOMOYO_MAX_PATH_OPERATION
+};
+
+/* Index numbers for /sys/kernel/security/tomoyo/stat interface. */
+enum tomoyo_memory_stat_type {
+	TOMOYO_MEMORY_POLICY,
+	TOMOYO_MEMORY_AUDIT,
+	TOMOYO_MEMORY_QUERY,
+	TOMOYO_MAX_MEMORY_STAT
+};
+
+enum tomoyo_mkdev_acl_index {
+	TOMOYO_TYPE_MKBLOCK,
+	TOMOYO_TYPE_MKCHAR,
+	TOMOYO_MAX_MKDEV_OPERATION
+};
+
+/* Index numbers for socket operations. */
+enum tomoyo_network_acl_index {
+	TOMOYO_NETWORK_BIND,    /* bind() operation. */
+	TOMOYO_NETWORK_LISTEN,  /* listen() operation. */
+	TOMOYO_NETWORK_CONNECT, /* connect() operation. */
+	TOMOYO_NETWORK_SEND,    /* send() operation. */
+	TOMOYO_MAX_NETWORK_OPERATION
+};
+
+/* Index numbers for access controls with two pathnames. */
+enum tomoyo_path2_acl_index {
+	TOMOYO_TYPE_LINK,
+	TOMOYO_TYPE_RENAME,
+	TOMOYO_TYPE_PIVOT_ROOT,
+	TOMOYO_MAX_PATH2_OPERATION
+};
+
+/* Index numbers for access controls with one pathname and one number. */
+enum tomoyo_path_number_acl_index {
+	TOMOYO_TYPE_CREATE,
+	TOMOYO_TYPE_MKDIR,
+	TOMOYO_TYPE_MKFIFO,
+	TOMOYO_TYPE_MKSOCK,
+	TOMOYO_TYPE_IOCTL,
+	TOMOYO_TYPE_CHMOD,
+	TOMOYO_TYPE_CHOWN,
+	TOMOYO_TYPE_CHGRP,
+	TOMOYO_MAX_PATH_NUMBER_OPERATION
+};
+
+/* Index numbers for /sys/kernel/security/tomoyo/ interfaces. */
+enum tomoyo_securityfs_interface_index {
+	TOMOYO_DOMAINPOLICY,
+	TOMOYO_EXCEPTIONPOLICY,
+	TOMOYO_PROCESS_STATUS,
+	TOMOYO_STAT,
+	TOMOYO_AUDIT,
+	TOMOYO_VERSION,
+	TOMOYO_PROFILE,
+	TOMOYO_QUERY,
+	TOMOYO_MANAGER
+};
+
+/* Index numbers for special mount operations. */
+enum tomoyo_special_mount {
+	TOMOYO_MOUNT_BIND,            /* mount --bind /source /dest   */
+	TOMOYO_MOUNT_MOVE,            /* mount --move /old /new       */
+	TOMOYO_MOUNT_REMOUNT,         /* mount -o remount /dir        */
+	TOMOYO_MOUNT_MAKE_UNBINDABLE, /* mount --make-unbindable /dir */
+	TOMOYO_MOUNT_MAKE_PRIVATE,    /* mount --make-private /dir    */
+	TOMOYO_MOUNT_MAKE_SLAVE,      /* mount --make-slave /dir      */
+	TOMOYO_MOUNT_MAKE_SHARED,     /* mount --make-shared /dir     */
+	TOMOYO_MAX_SPECIAL_MOUNT
+};
+
+/* Index numbers for functionality. */
+enum tomoyo_mac_index {
+	TOMOYO_MAC_FILE_EXECUTE,
+	TOMOYO_MAC_FILE_OPEN,
+	TOMOYO_MAC_FILE_CREATE,
+	TOMOYO_MAC_FILE_UNLINK,
+	TOMOYO_MAC_FILE_GETATTR,
+	TOMOYO_MAC_FILE_MKDIR,
+	TOMOYO_MAC_FILE_RMDIR,
+	TOMOYO_MAC_FILE_MKFIFO,
+	TOMOYO_MAC_FILE_MKSOCK,
+	TOMOYO_MAC_FILE_TRUNCATE,
+	TOMOYO_MAC_FILE_SYMLINK,
+	TOMOYO_MAC_FILE_MKBLOCK,
+	TOMOYO_MAC_FILE_MKCHAR,
+	TOMOYO_MAC_FILE_LINK,
+	TOMOYO_MAC_FILE_RENAME,
+	TOMOYO_MAC_FILE_CHMOD,
+	TOMOYO_MAC_FILE_CHOWN,
+	TOMOYO_MAC_FILE_CHGRP,
+	TOMOYO_MAC_FILE_IOCTL,
+	TOMOYO_MAC_FILE_CHROOT,
+	TOMOYO_MAC_FILE_MOUNT,
+	TOMOYO_MAC_FILE_UMOUNT,
+	TOMOYO_MAC_FILE_PIVOT_ROOT,
+	TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+	TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+	TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+	TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+	TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+	TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+	TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+	TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+	TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+	TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+	TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+	TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+	TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+	TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+	TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+	TOMOYO_MAC_ENVIRON,
+	TOMOYO_MAX_MAC_INDEX
+};
+
+/* Index numbers for category of functionality. */
+enum tomoyo_mac_category_index {
+	TOMOYO_MAC_CATEGORY_FILE,
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	TOMOYO_MAC_CATEGORY_MISC,
+	TOMOYO_MAX_MAC_CATEGORY_INDEX
+};
+
+/*
+ * Retry this request. Returned by tomoyo_supervisor() if policy violation has
+ * occurred in enforcing mode and the userspace daemon decided to retry.
+ *
+ * We must choose a positive value in order to distinguish "granted" (which is
+ * 0) and "rejected" (which is a negative value) and "retry".
+ */
+#define TOMOYO_RETRY_REQUEST 1
+
+/* Index numbers for /sys/kernel/security/tomoyo/stat interface. */
+enum tomoyo_policy_stat_type {
+	/* Do not change this order. */
+	TOMOYO_STAT_POLICY_UPDATES,
+	TOMOYO_STAT_POLICY_LEARNING,   /* == TOMOYO_CONFIG_LEARNING */
+	TOMOYO_STAT_POLICY_PERMISSIVE, /* == TOMOYO_CONFIG_PERMISSIVE */
+	TOMOYO_STAT_POLICY_ENFORCING,  /* == TOMOYO_CONFIG_ENFORCING */
+	TOMOYO_MAX_POLICY_STAT
+};
+
+/* Index numbers for profile's PREFERENCE values. */
+enum tomoyo_pref_index {
+	TOMOYO_PREF_MAX_AUDIT_LOG,
+	TOMOYO_PREF_MAX_LEARNING_ENTRY,
+	TOMOYO_MAX_PREF
+};
+
+/********** Structure definitions. **********/
+
+/* Common header for holding ACL entries. */
+struct tomoyo_acl_head {
+	struct list_head list;
+	s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
+} __packed;
+
+/* Common header for shared entries. */
+struct tomoyo_shared_acl_head {
+	struct list_head list;
+	atomic_t users;
+} __packed;
+
+struct tomoyo_policy_namespace;
+
+/* Structure for request info. */
+struct tomoyo_request_info {
+	/*
+	 * For holding parameters specific to operations which deal files.
+	 * NULL if not dealing files.
+	 */
+	struct tomoyo_obj_info *obj;
+	/*
+	 * For holding parameters specific to execve() request.
+	 * NULL if not dealing do_execve().
+	 */
+	struct tomoyo_execve *ee;
+	struct tomoyo_domain_info *domain;
+	/* For holding parameters. */
+	union {
+		struct {
+			const struct tomoyo_path_info *filename;
+			/* For using wildcards at tomoyo_find_next_domain(). */
+			const struct tomoyo_path_info *matched_path;
+			/* One of values in "enum tomoyo_path_acl_index". */
+			u8 operation;
+		} path;
+		struct {
+			const struct tomoyo_path_info *filename1;
+			const struct tomoyo_path_info *filename2;
+			/* One of values in "enum tomoyo_path2_acl_index". */
+			u8 operation;
+		} path2;
+		struct {
+			const struct tomoyo_path_info *filename;
+			unsigned int mode;
+			unsigned int major;
+			unsigned int minor;
+			/* One of values in "enum tomoyo_mkdev_acl_index". */
+			u8 operation;
+		} mkdev;
+		struct {
+			const struct tomoyo_path_info *filename;
+			unsigned long number;
+			/*
+			 * One of values in
+			 * "enum tomoyo_path_number_acl_index".
+			 */
+			u8 operation;
+		} path_number;
+		struct {
+			const struct tomoyo_path_info *name;
+		} environ;
+		struct {
+			const __be32 *address;
+			u16 port;
+			/* One of values smaller than TOMOYO_SOCK_MAX. */
+			u8 protocol;
+			/* One of values in "enum tomoyo_network_acl_index". */
+			u8 operation;
+			bool is_ipv6;
+		} inet_network;
+		struct {
+			const struct tomoyo_path_info *address;
+			/* One of values smaller than TOMOYO_SOCK_MAX. */
+			u8 protocol;
+			/* One of values in "enum tomoyo_network_acl_index". */
+			u8 operation;
+		} unix_network;
+		struct {
+			const struct tomoyo_path_info *type;
+			const struct tomoyo_path_info *dir;
+			const struct tomoyo_path_info *dev;
+			unsigned long flags;
+			int need_dev;
+		} mount;
+		struct {
+			const struct tomoyo_path_info *domainname;
+		} task;
+	} param;
+	struct tomoyo_acl_info *matched_acl;
+	u8 param_type;
+	bool granted;
+	u8 retry;
+	u8 profile;
+	u8 mode; /* One of tomoyo_mode_index . */
+	u8 type;
+};
+
+/* Structure for holding a token. */
+struct tomoyo_path_info {
+	const char *name;
+	u32 hash;          /* = full_name_hash(name, strlen(name)) */
+	u16 const_len;     /* = tomoyo_const_part_length(name)     */
+	bool is_dir;       /* = tomoyo_strendswith(name, "/")      */
+	bool is_patterned; /* = tomoyo_path_contains_pattern(name) */
+};
+
+/* Structure for holding string data. */
+struct tomoyo_name {
+	struct tomoyo_shared_acl_head head;
+	struct tomoyo_path_info entry;
+};
+
+/* Structure for holding a word. */
+struct tomoyo_name_union {
+	/* Either @filename or @group is NULL. */
+	const struct tomoyo_path_info *filename;
+	struct tomoyo_group *group;
+};
+
+/* Structure for holding a number. */
+struct tomoyo_number_union {
+	unsigned long values[2];
+	struct tomoyo_group *group; /* Maybe NULL. */
+	/* One of values in "enum tomoyo_value_type". */
+	u8 value_type[2];
+};
+
+/* Structure for holding an IP address. */
+struct tomoyo_ipaddr_union {
+	struct in6_addr ip[2]; /* Big endian. */
+	struct tomoyo_group *group; /* Pointer to address group. */
+	bool is_ipv6; /* Valid only if @group == NULL. */
+};
+
+/* Structure for "path_group"/"number_group"/"address_group" directive. */
+struct tomoyo_group {
+	struct tomoyo_shared_acl_head head;
+	const struct tomoyo_path_info *group_name;
+	struct list_head member_list;
+};
+
+/* Structure for "path_group" directive. */
+struct tomoyo_path_group {
+	struct tomoyo_acl_head head;
+	const struct tomoyo_path_info *member_name;
+};
+
+/* Structure for "number_group" directive. */
+struct tomoyo_number_group {
+	struct tomoyo_acl_head head;
+	struct tomoyo_number_union number;
+};
+
+/* Structure for "address_group" directive. */
+struct tomoyo_address_group {
+	struct tomoyo_acl_head head;
+	/* Structure for holding an IP address. */
+	struct tomoyo_ipaddr_union address;
+};
+
+/* Subset of "struct stat". Used by conditional ACL and audit logs. */
+struct tomoyo_mini_stat {
+	kuid_t uid;
+	kgid_t gid;
+	ino_t ino;
+	umode_t mode;
+	dev_t dev;
+	dev_t rdev;
+};
+
+/* Structure for dumping argv[] and envp[] of "struct linux_binprm". */
+struct tomoyo_page_dump {
+	struct page *page;    /* Previously dumped page. */
+	char *data;           /* Contents of "page". Size is PAGE_SIZE. */
+};
+
+/* Structure for attribute checks in addition to pathname checks. */
+struct tomoyo_obj_info {
+	/*
+	 * True if tomoyo_get_attributes() was already called, false otherwise.
+	 */
+	bool validate_done;
+	/* True if @stat[] is valid. */
+	bool stat_valid[TOMOYO_MAX_PATH_STAT];
+	/* First pathname. Initialized with { NULL, NULL } if no path. */
+	struct path path1;
+	/* Second pathname. Initialized with { NULL, NULL } if no path. */
+	struct path path2;
+	/*
+	 * Information on @path1, @path1's parent directory, @path2, @path2's
+	 * parent directory.
+	 */
+	struct tomoyo_mini_stat stat[TOMOYO_MAX_PATH_STAT];
+	/*
+	 * Content of symbolic link to be created. NULL for operations other
+	 * than symlink().
+	 */
+	struct tomoyo_path_info *symlink_target;
+};
+
+/* Structure for argv[]. */
+struct tomoyo_argv {
+	unsigned long index;
+	const struct tomoyo_path_info *value;
+	bool is_not;
+};
+
+/* Structure for envp[]. */
+struct tomoyo_envp {
+	const struct tomoyo_path_info *name;
+	const struct tomoyo_path_info *value;
+	bool is_not;
+};
+
+/* Structure for execve() operation. */
+struct tomoyo_execve {
+	struct tomoyo_request_info r;
+	struct tomoyo_obj_info obj;
+	struct linux_binprm *bprm;
+	const struct tomoyo_path_info *transition;
+	/* For dumping argv[] and envp[]. */
+	struct tomoyo_page_dump dump;
+	/* For temporary use. */
+	char *tmp; /* Size is TOMOYO_EXEC_TMPSIZE bytes */
+};
+
+/* Structure for entries which follows "struct tomoyo_condition". */
+struct tomoyo_condition_element {
+	/*
+	 * Left hand operand. A "struct tomoyo_argv" for TOMOYO_ARGV_ENTRY, a
+	 * "struct tomoyo_envp" for TOMOYO_ENVP_ENTRY is attached to the tail
+	 * of the array of this struct.
+	 */
+	u8 left;
+	/*
+	 * Right hand operand. A "struct tomoyo_number_union" for
+	 * TOMOYO_NUMBER_UNION, a "struct tomoyo_name_union" for
+	 * TOMOYO_NAME_UNION is attached to the tail of the array of this
+	 * struct.
+	 */
+	u8 right;
+	/* Equation operator. True if equals or overlaps, false otherwise. */
+	bool equals;
+};
+
+/* Structure for optional arguments. */
+struct tomoyo_condition {
+	struct tomoyo_shared_acl_head head;
+	u32 size; /* Memory size allocated for this entry. */
+	u16 condc; /* Number of conditions in this struct. */
+	u16 numbers_count; /* Number of "struct tomoyo_number_union values". */
+	u16 names_count; /* Number of "struct tomoyo_name_union names". */
+	u16 argc; /* Number of "struct tomoyo_argv". */
+	u16 envc; /* Number of "struct tomoyo_envp". */
+	u8 grant_log; /* One of values in "enum tomoyo_grant_log". */
+	const struct tomoyo_path_info *transit; /* Maybe NULL. */
+	/*
+	 * struct tomoyo_condition_element condition[condc];
+	 * struct tomoyo_number_union values[numbers_count];
+	 * struct tomoyo_name_union names[names_count];
+	 * struct tomoyo_argv argv[argc];
+	 * struct tomoyo_envp envp[envc];
+	 */
+};
+
+/* Common header for individual entries. */
+struct tomoyo_acl_info {
+	struct list_head list;
+	struct tomoyo_condition *cond; /* Maybe NULL. */
+	s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
+	u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */
+} __packed;
+
+/* Structure for domain information. */
+struct tomoyo_domain_info {
+	struct list_head list;
+	struct list_head acl_info_list;
+	/* Name of this domain. Never NULL.          */
+	const struct tomoyo_path_info *domainname;
+	/* Namespace for this domain. Never NULL. */
+	struct tomoyo_policy_namespace *ns;
+	u8 profile;        /* Profile number to use. */
+	u8 group;          /* Group number to use.   */
+	bool is_deleted;   /* Delete flag.           */
+	bool flags[TOMOYO_MAX_DOMAIN_INFO_FLAGS];
+	atomic_t users; /* Number of referring credentials. */
+};
+
+/*
+ * Structure for "task manual_domain_transition" directive.
+ */
+struct tomoyo_task_acl {
+	struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MANUAL_TASK_ACL */
+	/* Pointer to domainname. */
+	const struct tomoyo_path_info *domainname;
+};
+
+/*
+ * Structure for "file execute", "file read", "file write", "file append",
+ * "file unlink", "file getattr", "file rmdir", "file truncate",
+ * "file symlink", "file chroot" and "file unmount" directive.
+ */
+struct tomoyo_path_acl {
+	struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */
+	u16 perm; /* Bitmask of values in "enum tomoyo_path_acl_index". */
+	struct tomoyo_name_union name;
+};
+
+/*
+ * Structure for "file create", "file mkdir", "file mkfifo", "file mksock",
+ * "file ioctl", "file chmod", "file chown" and "file chgrp" directive.
+ */
+struct tomoyo_path_number_acl {
+	struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_NUMBER_ACL */
+	/* Bitmask of values in "enum tomoyo_path_number_acl_index". */
+	u8 perm;
+	struct tomoyo_name_union name;
+	struct tomoyo_number_union number;
+};
+
+/* Structure for "file mkblock" and "file mkchar" directive. */
+struct tomoyo_mkdev_acl {
+	struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MKDEV_ACL */
+	u8 perm; /* Bitmask of values in "enum tomoyo_mkdev_acl_index". */
+	struct tomoyo_name_union name;
+	struct tomoyo_number_union mode;
+	struct tomoyo_number_union major;
+	struct tomoyo_number_union minor;
+};
+
+/*
+ * Structure for "file rename", "file link" and "file pivot_root" directive.
+ */
+struct tomoyo_path2_acl {
+	struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH2_ACL */
+	u8 perm; /* Bitmask of values in "enum tomoyo_path2_acl_index". */
+	struct tomoyo_name_union name1;
+	struct tomoyo_name_union name2;
+};
+
+/* Structure for "file mount" directive. */
+struct tomoyo_mount_acl {
+	struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MOUNT_ACL */
+	struct tomoyo_name_union dev_name;
+	struct tomoyo_name_union dir_name;
+	struct tomoyo_name_union fs_type;
+	struct tomoyo_number_union flags;
+};
+
+/* Structure for "misc env" directive in domain policy. */
+struct tomoyo_env_acl {
+	struct tomoyo_acl_info head;        /* type = TOMOYO_TYPE_ENV_ACL  */
+	const struct tomoyo_path_info *env; /* environment variable */
+};
+
+/* Structure for "network inet" directive. */
+struct tomoyo_inet_acl {
+	struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_INET_ACL */
+	u8 protocol;
+	u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+	struct tomoyo_ipaddr_union address;
+	struct tomoyo_number_union port;
+};
+
+/* Structure for "network unix" directive. */
+struct tomoyo_unix_acl {
+	struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_UNIX_ACL */
+	u8 protocol;
+	u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+	struct tomoyo_name_union name;
+};
+
+/* Structure for holding a line from /sys/kernel/security/tomoyo/ interface. */
+struct tomoyo_acl_param {
+	char *data;
+	struct list_head *list;
+	struct tomoyo_policy_namespace *ns;
+	bool is_delete;
+};
+
+#define TOMOYO_MAX_IO_READ_QUEUE 64
+
+/*
+ * Structure for reading/writing policy via /sys/kernel/security/tomoyo
+ * interfaces.
+ */
+struct tomoyo_io_buffer {
+	void (*read) (struct tomoyo_io_buffer *);
+	int (*write) (struct tomoyo_io_buffer *);
+	unsigned int (*poll) (struct file *file, poll_table *wait);
+	/* Exclusive lock for this structure.   */
+	struct mutex io_sem;
+	char __user *read_user_buf;
+	size_t read_user_buf_avail;
+	struct {
+		struct list_head *ns;
+		struct list_head *domain;
+		struct list_head *group;
+		struct list_head *acl;
+		size_t avail;
+		unsigned int step;
+		unsigned int query_index;
+		u16 index;
+		u16 cond_index;
+		u8 acl_group_index;
+		u8 cond_step;
+		u8 bit;
+		u8 w_pos;
+		bool eof;
+		bool print_this_domain_only;
+		bool print_transition_related_only;
+		bool print_cond_part;
+		const char *w[TOMOYO_MAX_IO_READ_QUEUE];
+	} r;
+	struct {
+		struct tomoyo_policy_namespace *ns;
+		/* The position currently writing to.   */
+		struct tomoyo_domain_info *domain;
+		/* Bytes available for writing.         */
+		size_t avail;
+		bool is_delete;
+	} w;
+	/* Buffer for reading.                  */
+	char *read_buf;
+	/* Size of read buffer.                 */
+	size_t readbuf_size;
+	/* Buffer for writing.                  */
+	char *write_buf;
+	/* Size of write buffer.                */
+	size_t writebuf_size;
+	/* Type of this interface.              */
+	enum tomoyo_securityfs_interface_index type;
+	/* Users counter protected by tomoyo_io_buffer_list_lock. */
+	u8 users;
+	/* List for telling GC not to kfree() elements. */
+	struct list_head list;
+};
+
+/*
+ * Structure for "initialize_domain"/"no_initialize_domain"/"keep_domain"/
+ * "no_keep_domain" keyword.
+ */
+struct tomoyo_transition_control {
+	struct tomoyo_acl_head head;
+	u8 type; /* One of values in "enum tomoyo_transition_type".  */
+	/* True if the domainname is tomoyo_get_last_name(). */
+	bool is_last_name;
+	const struct tomoyo_path_info *domainname; /* Maybe NULL */
+	const struct tomoyo_path_info *program;    /* Maybe NULL */
+};
+
+/* Structure for "aggregator" keyword. */
+struct tomoyo_aggregator {
+	struct tomoyo_acl_head head;
+	const struct tomoyo_path_info *original_name;
+	const struct tomoyo_path_info *aggregated_name;
+};
+
+/* Structure for policy manager. */
+struct tomoyo_manager {
+	struct tomoyo_acl_head head;
+	/* A path to program or a domainname. */
+	const struct tomoyo_path_info *manager;
+};
+
+struct tomoyo_preference {
+	unsigned int learning_max_entry;
+	bool enforcing_verbose;
+	bool learning_verbose;
+	bool permissive_verbose;
+};
+
+/* Structure for /sys/kernel/security/tomnoyo/profile interface. */
+struct tomoyo_profile {
+	const struct tomoyo_path_info *comment;
+	struct tomoyo_preference *learning;
+	struct tomoyo_preference *permissive;
+	struct tomoyo_preference *enforcing;
+	struct tomoyo_preference preference;
+	u8 default_config;
+	u8 config[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX];
+	unsigned int pref[TOMOYO_MAX_PREF];
+};
+
+/* Structure for representing YYYY/MM/DD hh/mm/ss. */
+struct tomoyo_time {
+	u16 year;
+	u8 month;
+	u8 day;
+	u8 hour;
+	u8 min;
+	u8 sec;
+};
+
+/* Structure for policy namespace. */
+struct tomoyo_policy_namespace {
+	/* Profile table. Memory is allocated as needed. */
+	struct tomoyo_profile *profile_ptr[TOMOYO_MAX_PROFILES];
+	/* List of "struct tomoyo_group". */
+	struct list_head group_list[TOMOYO_MAX_GROUP];
+	/* List of policy. */
+	struct list_head policy_list[TOMOYO_MAX_POLICY];
+	/* The global ACL referred by "use_group" keyword. */
+	struct list_head acl_group[TOMOYO_MAX_ACL_GROUPS];
+	/* List for connecting to tomoyo_namespace_list list. */
+	struct list_head namespace_list;
+	/* Profile version. Currently only 20110903 is defined. */
+	unsigned int profile_version;
+	/* Name of this namespace (e.g. "<kernel>", "</usr/sbin/httpd>" ). */
+	const char *name;
+};
+
+/********** Function prototypes. **********/
+
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+				  const struct tomoyo_group *group);
+bool tomoyo_compare_number_union(const unsigned long value,
+				 const struct tomoyo_number_union *ptr);
+bool tomoyo_condition(struct tomoyo_request_info *r,
+		      const struct tomoyo_condition *cond);
+bool tomoyo_correct_domain(const unsigned char *domainname);
+bool tomoyo_correct_path(const char *filename);
+bool tomoyo_correct_word(const char *string);
+bool tomoyo_domain_def(const unsigned char *buffer);
+bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r);
+bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
+		      struct tomoyo_page_dump *dump);
+bool tomoyo_memory_ok(void *ptr);
+bool tomoyo_number_matches_group(const unsigned long min,
+				 const unsigned long max,
+				 const struct tomoyo_group *group);
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+			       struct tomoyo_ipaddr_union *ptr);
+bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
+			     struct tomoyo_name_union *ptr);
+bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
+			       struct tomoyo_number_union *ptr);
+bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
+				 const struct tomoyo_path_info *pattern);
+bool tomoyo_permstr(const char *string, const char *keyword);
+bool tomoyo_str_starts(char **src, const char *find);
+char *tomoyo_encode(const char *str);
+char *tomoyo_encode2(const char *str, int str_len);
+char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
+		      va_list args);
+char *tomoyo_read_token(struct tomoyo_acl_param *param);
+char *tomoyo_realpath_from_path(const struct path *path);
+char *tomoyo_realpath_nofollow(const char *pathname);
+const char *tomoyo_get_exe(void);
+const char *tomoyo_yesno(const unsigned int value);
+const struct tomoyo_path_info *tomoyo_compare_name_union
+(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr);
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param);
+const struct tomoyo_path_info *tomoyo_get_name(const char *name);
+const struct tomoyo_path_info *tomoyo_path_matches_group
+(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group);
+int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
+				 struct path *path, const int flag);
+void tomoyo_close_control(struct tomoyo_io_buffer *head);
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env);
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+			      const struct tomoyo_path_info *filename);
+int tomoyo_find_next_domain(struct linux_binprm *bprm);
+int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
+		    const u8 index);
+int tomoyo_init_request_info(struct tomoyo_request_info *r,
+			     struct tomoyo_domain_info *domain,
+			     const u8 index);
+int tomoyo_mkdev_perm(const u8 operation, struct path *path,
+		      const unsigned int mode, unsigned int dev);
+int tomoyo_mount_permission(const char *dev_name, struct path *path,
+			    const char *type, unsigned long flags,
+			    void *data_page);
+int tomoyo_open_control(const u8 type, struct file *file);
+int tomoyo_path2_perm(const u8 operation, struct path *path1,
+		      struct path *path2);
+int tomoyo_path_number_perm(const u8 operation, struct path *path,
+			    unsigned long number);
+int tomoyo_path_perm(const u8 operation, const struct path *path,
+		     const char *target);
+unsigned int tomoyo_poll_control(struct file *file, poll_table *wait);
+unsigned int tomoyo_poll_log(struct file *file, poll_table *wait);
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+				  int addr_len);
+int tomoyo_socket_connect_permission(struct socket *sock,
+				     struct sockaddr *addr, int addr_len);
+int tomoyo_socket_listen_permission(struct socket *sock);
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+				     int size);
+int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
+	__printf(2, 3);
+int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
+			 struct tomoyo_acl_param *param,
+			 bool (*check_duplicate)
+			 (const struct tomoyo_acl_info *,
+			  const struct tomoyo_acl_info *),
+			 bool (*merge_duplicate)
+			 (struct tomoyo_acl_info *, struct tomoyo_acl_info *,
+			  const bool));
+int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
+			 struct tomoyo_acl_param *param,
+			 bool (*check_duplicate)
+			 (const struct tomoyo_acl_head *,
+			  const struct tomoyo_acl_head *));
+int tomoyo_write_aggregator(struct tomoyo_acl_param *param);
+int tomoyo_write_file(struct tomoyo_acl_param *param);
+int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type);
+int tomoyo_write_misc(struct tomoyo_acl_param *param);
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param);
+int tomoyo_write_transition_control(struct tomoyo_acl_param *param,
+				    const u8 type);
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param);
+ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
+			    const int buffer_len);
+ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
+			     const char __user *buffer, const int buffer_len);
+struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param);
+struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
+						const bool transit);
+struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname);
+struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
+				      const u8 idx);
+struct tomoyo_policy_namespace *tomoyo_assign_namespace
+(const char *domainname);
+struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns,
+				      const u8 profile);
+unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain,
+				const u8 index);
+u8 tomoyo_parse_ulong(unsigned long *result, char **str);
+void *tomoyo_commit_ok(void *data, const unsigned int size);
+void __init tomoyo_load_builtin_policy(void);
+void __init tomoyo_mm_init(void);
+void tomoyo_check_acl(struct tomoyo_request_info *r,
+		      bool (*check_entry) (struct tomoyo_request_info *,
+					   const struct tomoyo_acl_info *));
+void tomoyo_check_profile(void);
+void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp);
+void tomoyo_del_condition(struct list_head *element);
+void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
+void tomoyo_get_attributes(struct tomoyo_obj_info *obj);
+void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns);
+void tomoyo_load_policy(const char *filename);
+void tomoyo_normalize_line(unsigned char *buffer);
+void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register);
+void tomoyo_print_ip(char *buf, const unsigned int size,
+		     const struct tomoyo_ipaddr_union *ptr);
+void tomoyo_print_ulong(char *buffer, const int buffer_len,
+			const unsigned long value, const u8 type);
+void tomoyo_put_name_union(struct tomoyo_name_union *ptr);
+void tomoyo_put_number_union(struct tomoyo_number_union *ptr);
+void tomoyo_read_log(struct tomoyo_io_buffer *head);
+void tomoyo_update_stat(const u8 index);
+void tomoyo_warn_oom(const char *function);
+void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...)
+	__printf(2, 3);
+void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
+		       va_list args);
+
+/********** External variable definitions. **********/
+
+extern bool tomoyo_policy_loaded;
+extern const char * const tomoyo_condition_keyword
+[TOMOYO_MAX_CONDITION_KEYWORD];
+extern const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS];
+extern const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+					      + TOMOYO_MAX_MAC_CATEGORY_INDEX];
+extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE];
+extern const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION];
+extern const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX];
+extern const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION];
+extern const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX];
+extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION];
+extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION];
+extern const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION];
+extern struct list_head tomoyo_condition_list;
+extern struct list_head tomoyo_domain_list;
+extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+extern struct list_head tomoyo_namespace_list;
+extern struct mutex tomoyo_policy_lock;
+extern struct srcu_struct tomoyo_ss;
+extern struct tomoyo_domain_info tomoyo_kernel_domain;
+extern struct tomoyo_policy_namespace tomoyo_kernel_namespace;
+extern unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
+extern unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
+
+/********** Inlined functions. **********/
+
+/**
+ * tomoyo_read_lock - Take lock for protecting policy.
+ *
+ * Returns index number for tomoyo_read_unlock().
+ */
+static inline int tomoyo_read_lock(void)
+{
+	return srcu_read_lock(&tomoyo_ss);
+}
+
+/**
+ * tomoyo_read_unlock - Release lock for protecting policy.
+ *
+ * @idx: Index number returned by tomoyo_read_lock().
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_read_unlock(int idx)
+{
+	srcu_read_unlock(&tomoyo_ss, idx);
+}
+
+/**
+ * tomoyo_sys_getppid - Copy of getppid().
+ *
+ * Returns parent process's PID.
+ *
+ * Alpha does not have getppid() defined. To be able to build this module on
+ * Alpha, I have to copy getppid() from kernel/timer.c.
+ */
+static inline pid_t tomoyo_sys_getppid(void)
+{
+	pid_t pid;
+	rcu_read_lock();
+	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
+	rcu_read_unlock();
+	return pid;
+}
+
+/**
+ * tomoyo_sys_getpid - Copy of getpid().
+ *
+ * Returns current thread's PID.
+ *
+ * Alpha does not have getpid() defined. To be able to build this module on
+ * Alpha, I have to copy getpid() from kernel/timer.c.
+ */
+static inline pid_t tomoyo_sys_getpid(void)
+{
+	return task_tgid_vnr(current);
+}
+
+/**
+ * tomoyo_pathcmp - strcmp() for "struct tomoyo_path_info" structure.
+ *
+ * @a: Pointer to "struct tomoyo_path_info".
+ * @b: Pointer to "struct tomoyo_path_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a,
+				  const struct tomoyo_path_info *b)
+{
+	return a->hash != b->hash || strcmp(a->name, b->name);
+}
+
+/**
+ * tomoyo_put_name - Drop reference on "struct tomoyo_name".
+ *
+ * @name: Pointer to "struct tomoyo_path_info". Maybe NULL.
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_put_name(const struct tomoyo_path_info *name)
+{
+	if (name) {
+		struct tomoyo_name *ptr =
+			container_of(name, typeof(*ptr), entry);
+		atomic_dec(&ptr->head.users);
+	}
+}
+
+/**
+ * tomoyo_put_condition - Drop reference on "struct tomoyo_condition".
+ *
+ * @cond: Pointer to "struct tomoyo_condition". Maybe NULL.
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_put_condition(struct tomoyo_condition *cond)
+{
+	if (cond)
+		atomic_dec(&cond->head.users);
+}
+
+/**
+ * tomoyo_put_group - Drop reference on "struct tomoyo_group".
+ *
+ * @group: Pointer to "struct tomoyo_group". Maybe NULL.
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_put_group(struct tomoyo_group *group)
+{
+	if (group)
+		atomic_dec(&group->head.users);
+}
+
+/**
+ * tomoyo_domain - Get "struct tomoyo_domain_info" for current thread.
+ *
+ * Returns pointer to "struct tomoyo_domain_info" for current thread.
+ */
+static inline struct tomoyo_domain_info *tomoyo_domain(void)
+{
+	return current_cred()->security;
+}
+
+/**
+ * tomoyo_real_domain - Get "struct tomoyo_domain_info" for specified thread.
+ *
+ * @task: Pointer to "struct task_struct".
+ *
+ * Returns pointer to "struct tomoyo_security" for specified thread.
+ */
+static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
+							    *task)
+{
+	return task_cred_xxx(task, security);
+}
+
+/**
+ * tomoyo_same_name_union - Check for duplicated "struct tomoyo_name_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_name_union".
+ * @b: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_name_union
+(const struct tomoyo_name_union *a, const struct tomoyo_name_union *b)
+{
+	return a->filename == b->filename && a->group == b->group;
+}
+
+/**
+ * tomoyo_same_number_union - Check for duplicated "struct tomoyo_number_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_number_union".
+ * @b: Pointer to "struct tomoyo_number_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_number_union
+(const struct tomoyo_number_union *a, const struct tomoyo_number_union *b)
+{
+	return a->values[0] == b->values[0] && a->values[1] == b->values[1] &&
+		a->group == b->group && a->value_type[0] == b->value_type[0] &&
+		a->value_type[1] == b->value_type[1];
+}
+
+/**
+ * tomoyo_same_ipaddr_union - Check for duplicated "struct tomoyo_ipaddr_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_ipaddr_union".
+ * @b: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_ipaddr_union
+(const struct tomoyo_ipaddr_union *a, const struct tomoyo_ipaddr_union *b)
+{
+	return !memcmp(a->ip, b->ip, sizeof(a->ip)) && a->group == b->group &&
+		a->is_ipv6 == b->is_ipv6;
+}
+
+/**
+ * tomoyo_current_namespace - Get "struct tomoyo_policy_namespace" for current thread.
+ *
+ * Returns pointer to "struct tomoyo_policy_namespace" for current thread.
+ */
+static inline struct tomoyo_policy_namespace *tomoyo_current_namespace(void)
+{
+	return tomoyo_domain()->ns;
+}
+
+#if defined(CONFIG_SLOB)
+
+/**
+ * tomoyo_round2 - Round up to power of 2 for calculating memory usage.
+ *
+ * @size: Size to be rounded up.
+ *
+ * Returns @size.
+ *
+ * Since SLOB does not round up, this function simply returns @size.
+ */
+static inline int tomoyo_round2(size_t size)
+{
+	return size;
+}
+
+#else
+
+/**
+ * tomoyo_round2 - Round up to power of 2 for calculating memory usage.
+ *
+ * @size: Size to be rounded up.
+ *
+ * Returns rounded size.
+ *
+ * Strictly speaking, SLAB may be able to allocate (e.g.) 96 bytes instead of
+ * (e.g.) 128 bytes.
+ */
+static inline int tomoyo_round2(size_t size)
+{
+#if PAGE_SIZE == 4096
+	size_t bsize = 32;
+#else
+	size_t bsize = 64;
+#endif
+	if (!size)
+		return 0;
+	while (size > bsize)
+		bsize <<= 1;
+	return bsize;
+}
+
+#endif
+
+/**
+ * list_for_each_cookie - iterate over a list with cookie.
+ * @pos:        the &struct list_head to use as a loop cursor.
+ * @head:       the head for your list.
+ */
+#define list_for_each_cookie(pos, head)					\
+	if (!pos)							\
+		pos =  srcu_dereference((head)->next, &tomoyo_ss);	\
+	for ( ; pos != (head); pos = srcu_dereference(pos->next, &tomoyo_ss))
+
+#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */
diff --git a/security/tomoyo/condition.c b/security/tomoyo/condition.c
new file mode 100644
index 0000000..6c4528d
--- /dev/null
+++ b/security/tomoyo/condition.c
@@ -0,0 +1,1094 @@
+/*
+ * security/tomoyo/condition.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/* List of "struct tomoyo_condition". */
+LIST_HEAD(tomoyo_condition_list);
+
+/**
+ * tomoyo_argv - Check argv[] in "struct linux_binbrm".
+ *
+ * @index:   Index number of @arg_ptr.
+ * @arg_ptr: Contents of argv[@index].
+ * @argc:    Length of @argv.
+ * @argv:    Pointer to "struct tomoyo_argv".
+ * @checked: Set to true if @argv[@index] was found.
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_argv(const unsigned int index, const char *arg_ptr,
+			const int argc, const struct tomoyo_argv *argv,
+			u8 *checked)
+{
+	int i;
+	struct tomoyo_path_info arg;
+	arg.name = arg_ptr;
+	for (i = 0; i < argc; argv++, checked++, i++) {
+		bool result;
+		if (index != argv->index)
+			continue;
+		*checked = 1;
+		tomoyo_fill_path_info(&arg);
+		result = tomoyo_path_matches_pattern(&arg, argv->value);
+		if (argv->is_not)
+			result = !result;
+		if (!result)
+			return false;
+	}
+	return true;
+}
+
+/**
+ * tomoyo_envp - Check envp[] in "struct linux_binbrm".
+ *
+ * @env_name:  The name of environment variable.
+ * @env_value: The value of environment variable.
+ * @envc:      Length of @envp.
+ * @envp:      Pointer to "struct tomoyo_envp".
+ * @checked:   Set to true if @envp[@env_name] was found.
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_envp(const char *env_name, const char *env_value,
+			const int envc, const struct tomoyo_envp *envp,
+			u8 *checked)
+{
+	int i;
+	struct tomoyo_path_info name;
+	struct tomoyo_path_info value;
+	name.name = env_name;
+	tomoyo_fill_path_info(&name);
+	value.name = env_value;
+	tomoyo_fill_path_info(&value);
+	for (i = 0; i < envc; envp++, checked++, i++) {
+		bool result;
+		if (!tomoyo_path_matches_pattern(&name, envp->name))
+			continue;
+		*checked = 1;
+		if (envp->value) {
+			result = tomoyo_path_matches_pattern(&value,
+							     envp->value);
+			if (envp->is_not)
+				result = !result;
+		} else {
+			result = true;
+			if (!envp->is_not)
+				result = !result;
+		}
+		if (!result)
+			return false;
+	}
+	return true;
+}
+
+/**
+ * tomoyo_scan_bprm - Scan "struct linux_binprm".
+ *
+ * @ee:   Pointer to "struct tomoyo_execve".
+ * @argc: Length of @argc.
+ * @argv: Pointer to "struct tomoyo_argv".
+ * @envc: Length of @envp.
+ * @envp: Poiner to "struct tomoyo_envp".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_scan_bprm(struct tomoyo_execve *ee,
+			     const u16 argc, const struct tomoyo_argv *argv,
+			     const u16 envc, const struct tomoyo_envp *envp)
+{
+	struct linux_binprm *bprm = ee->bprm;
+	struct tomoyo_page_dump *dump = &ee->dump;
+	char *arg_ptr = ee->tmp;
+	int arg_len = 0;
+	unsigned long pos = bprm->p;
+	int offset = pos % PAGE_SIZE;
+	int argv_count = bprm->argc;
+	int envp_count = bprm->envc;
+	bool result = true;
+	u8 local_checked[32];
+	u8 *checked;
+	if (argc + envc <= sizeof(local_checked)) {
+		checked = local_checked;
+		memset(local_checked, 0, sizeof(local_checked));
+	} else {
+		checked = kzalloc(argc + envc, GFP_NOFS);
+		if (!checked)
+			return false;
+	}
+	while (argv_count || envp_count) {
+		if (!tomoyo_dump_page(bprm, pos, dump)) {
+			result = false;
+			goto out;
+		}
+		pos += PAGE_SIZE - offset;
+		while (offset < PAGE_SIZE) {
+			/* Read. */
+			const char *kaddr = dump->data;
+			const unsigned char c = kaddr[offset++];
+			if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) {
+				if (c == '\\') {
+					arg_ptr[arg_len++] = '\\';
+					arg_ptr[arg_len++] = '\\';
+				} else if (c > ' ' && c < 127) {
+					arg_ptr[arg_len++] = c;
+				} else {
+					arg_ptr[arg_len++] = '\\';
+					arg_ptr[arg_len++] = (c >> 6) + '0';
+					arg_ptr[arg_len++] =
+						((c >> 3) & 7) + '0';
+					arg_ptr[arg_len++] = (c & 7) + '0';
+				}
+			} else {
+				arg_ptr[arg_len] = '\0';
+			}
+			if (c)
+				continue;
+			/* Check. */
+			if (argv_count) {
+				if (!tomoyo_argv(bprm->argc - argv_count,
+						 arg_ptr, argc, argv,
+						 checked)) {
+					result = false;
+					break;
+				}
+				argv_count--;
+			} else if (envp_count) {
+				char *cp = strchr(arg_ptr, '=');
+				if (cp) {
+					*cp = '\0';
+					if (!tomoyo_envp(arg_ptr, cp + 1,
+							 envc, envp,
+							 checked + argc)) {
+						result = false;
+						break;
+					}
+				}
+				envp_count--;
+			} else {
+				break;
+			}
+			arg_len = 0;
+		}
+		offset = 0;
+		if (!result)
+			break;
+	}
+out:
+	if (result) {
+		int i;
+		/* Check not-yet-checked entries. */
+		for (i = 0; i < argc; i++) {
+			if (checked[i])
+				continue;
+			/*
+			 * Return true only if all unchecked indexes in
+			 * bprm->argv[] are not matched.
+			 */
+			if (argv[i].is_not)
+				continue;
+			result = false;
+			break;
+		}
+		for (i = 0; i < envc; envp++, i++) {
+			if (checked[argc + i])
+				continue;
+			/*
+			 * Return true only if all unchecked environ variables
+			 * in bprm->envp[] are either undefined or not matched.
+			 */
+			if ((!envp->value && !envp->is_not) ||
+			    (envp->value && envp->is_not))
+				continue;
+			result = false;
+			break;
+		}
+	}
+	if (checked != local_checked)
+		kfree(checked);
+	return result;
+}
+
+/**
+ * tomoyo_scan_exec_realpath - Check "exec.realpath" parameter of "struct tomoyo_condition".
+ *
+ * @file:  Pointer to "struct file".
+ * @ptr:   Pointer to "struct tomoyo_name_union".
+ * @match: True if "exec.realpath=", false if "exec.realpath!=".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_scan_exec_realpath(struct file *file,
+				      const struct tomoyo_name_union *ptr,
+				      const bool match)
+{
+	bool result;
+	struct tomoyo_path_info exe;
+	if (!file)
+		return false;
+	exe.name = tomoyo_realpath_from_path(&file->f_path);
+	if (!exe.name)
+		return false;
+	tomoyo_fill_path_info(&exe);
+	result = tomoyo_compare_name_union(&exe, ptr);
+	kfree(exe.name);
+	return result == match;
+}
+
+/**
+ * tomoyo_get_dqword - tomoyo_get_name() for a quoted string.
+ *
+ * @start: String to save.
+ *
+ * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
+ */
+static const struct tomoyo_path_info *tomoyo_get_dqword(char *start)
+{
+	char *cp = start + strlen(start) - 1;
+	if (cp == start || *start++ != '"' || *cp != '"')
+		return NULL;
+	*cp = '\0';
+	if (*start && !tomoyo_correct_word(start))
+		return NULL;
+	return tomoyo_get_name(start);
+}
+
+/**
+ * tomoyo_parse_name_union_quoted - Parse a quoted word.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr:   Pointer to "struct tomoyo_name_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_parse_name_union_quoted(struct tomoyo_acl_param *param,
+					   struct tomoyo_name_union *ptr)
+{
+	char *filename = param->data;
+	if (*filename == '@')
+		return tomoyo_parse_name_union(param, ptr);
+	ptr->filename = tomoyo_get_dqword(filename);
+	return ptr->filename != NULL;
+}
+
+/**
+ * tomoyo_parse_argv - Parse an argv[] condition part.
+ *
+ * @left:  Lefthand value.
+ * @right: Righthand value.
+ * @argv:  Pointer to "struct tomoyo_argv".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_parse_argv(char *left, char *right,
+			      struct tomoyo_argv *argv)
+{
+	if (tomoyo_parse_ulong(&argv->index, &left) !=
+	    TOMOYO_VALUE_TYPE_DECIMAL || *left++ != ']' || *left)
+		return false;
+	argv->value = tomoyo_get_dqword(right);
+	return argv->value != NULL;
+}
+
+/**
+ * tomoyo_parse_envp - Parse an envp[] condition part.
+ *
+ * @left:  Lefthand value.
+ * @right: Righthand value.
+ * @envp:  Pointer to "struct tomoyo_envp".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_parse_envp(char *left, char *right,
+			      struct tomoyo_envp *envp)
+{
+	const struct tomoyo_path_info *name;
+	const struct tomoyo_path_info *value;
+	char *cp = left + strlen(left) - 1;
+	if (*cp-- != ']' || *cp != '"')
+		goto out;
+	*cp = '\0';
+	if (!tomoyo_correct_word(left))
+		goto out;
+	name = tomoyo_get_name(left);
+	if (!name)
+		goto out;
+	if (!strcmp(right, "NULL")) {
+		value = NULL;
+	} else {
+		value = tomoyo_get_dqword(right);
+		if (!value) {
+			tomoyo_put_name(name);
+			goto out;
+		}
+	}
+	envp->name = name;
+	envp->value = value;
+	return true;
+out:
+	return false;
+}
+
+/**
+ * tomoyo_same_condition - Check for duplicated "struct tomoyo_condition" entry.
+ *
+ * @a: Pointer to "struct tomoyo_condition".
+ * @b: Pointer to "struct tomoyo_condition".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_condition(const struct tomoyo_condition *a,
+					 const struct tomoyo_condition *b)
+{
+	return a->size == b->size && a->condc == b->condc &&
+		a->numbers_count == b->numbers_count &&
+		a->names_count == b->names_count &&
+		a->argc == b->argc && a->envc == b->envc &&
+		a->grant_log == b->grant_log && a->transit == b->transit &&
+		!memcmp(a + 1, b + 1, a->size - sizeof(*a));
+}
+
+/**
+ * tomoyo_condition_type - Get condition type.
+ *
+ * @word: Keyword string.
+ *
+ * Returns one of values in "enum tomoyo_conditions_index" on success,
+ * TOMOYO_MAX_CONDITION_KEYWORD otherwise.
+ */
+static u8 tomoyo_condition_type(const char *word)
+{
+	u8 i;
+	for (i = 0; i < TOMOYO_MAX_CONDITION_KEYWORD; i++) {
+		if (!strcmp(word, tomoyo_condition_keyword[i]))
+			break;
+	}
+	return i;
+}
+
+/* Define this to enable debug mode. */
+/* #define DEBUG_CONDITION */
+
+#ifdef DEBUG_CONDITION
+#define dprintk printk
+#else
+#define dprintk(...) do { } while (0)
+#endif
+
+/**
+ * tomoyo_commit_condition - Commit "struct tomoyo_condition".
+ *
+ * @entry: Pointer to "struct tomoyo_condition".
+ *
+ * Returns pointer to "struct tomoyo_condition" on success, NULL otherwise.
+ *
+ * This function merges duplicated entries. This function returns NULL if
+ * @entry is not duplicated but memory quota for policy has exceeded.
+ */
+static struct tomoyo_condition *tomoyo_commit_condition
+(struct tomoyo_condition *entry)
+{
+	struct tomoyo_condition *ptr;
+	bool found = false;
+	if (mutex_lock_interruptible(&tomoyo_policy_lock)) {
+		dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__);
+		ptr = NULL;
+		found = true;
+		goto out;
+	}
+	list_for_each_entry(ptr, &tomoyo_condition_list, head.list) {
+		if (!tomoyo_same_condition(ptr, entry) ||
+		    atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
+			continue;
+		/* Same entry found. Share this entry. */
+		atomic_inc(&ptr->head.users);
+		found = true;
+		break;
+	}
+	if (!found) {
+		if (tomoyo_memory_ok(entry)) {
+			atomic_set(&entry->head.users, 1);
+			list_add(&entry->head.list, &tomoyo_condition_list);
+		} else {
+			found = true;
+			ptr = NULL;
+		}
+	}
+	mutex_unlock(&tomoyo_policy_lock);
+out:
+	if (found) {
+		tomoyo_del_condition(&entry->head.list);
+		kfree(entry);
+		entry = ptr;
+	}
+	return entry;
+}
+
+/**
+ * tomoyo_get_transit_preference - Parse domain transition preference for execve().
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @e:     Pointer to "struct tomoyo_condition".
+ *
+ * Returns the condition string part.
+ */
+static char *tomoyo_get_transit_preference(struct tomoyo_acl_param *param,
+					   struct tomoyo_condition *e)
+{
+	char * const pos = param->data;
+	bool flag;
+	if (*pos == '<') {
+		e->transit = tomoyo_get_domainname(param);
+		goto done;
+	}
+	{
+		char *cp = strchr(pos, ' ');
+		if (cp)
+			*cp = '\0';
+		flag = tomoyo_correct_path(pos) || !strcmp(pos, "keep") ||
+			!strcmp(pos, "initialize") || !strcmp(pos, "reset") ||
+			!strcmp(pos, "child") || !strcmp(pos, "parent");
+		if (cp)
+			*cp = ' ';
+	}
+	if (!flag)
+		return pos;
+	e->transit = tomoyo_get_name(tomoyo_read_token(param));
+done:
+	if (e->transit)
+		return param->data;
+	/*
+	 * Return a bad read-only condition string that will let
+	 * tomoyo_get_condition() return NULL.
+	 */
+	return "/";
+}
+
+/**
+ * tomoyo_get_condition - Parse condition part.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns pointer to "struct tomoyo_condition" on success, NULL otherwise.
+ */
+struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param)
+{
+	struct tomoyo_condition *entry = NULL;
+	struct tomoyo_condition_element *condp = NULL;
+	struct tomoyo_number_union *numbers_p = NULL;
+	struct tomoyo_name_union *names_p = NULL;
+	struct tomoyo_argv *argv = NULL;
+	struct tomoyo_envp *envp = NULL;
+	struct tomoyo_condition e = { };
+	char * const start_of_string =
+		tomoyo_get_transit_preference(param, &e);
+	char * const end_of_string = start_of_string + strlen(start_of_string);
+	char *pos;
+rerun:
+	pos = start_of_string;
+	while (1) {
+		u8 left = -1;
+		u8 right = -1;
+		char *left_word = pos;
+		char *cp;
+		char *right_word;
+		bool is_not;
+		if (!*left_word)
+			break;
+		/*
+		 * Since left-hand condition does not allow use of "path_group"
+		 * or "number_group" and environment variable's names do not
+		 * accept '=', it is guaranteed that the original line consists
+		 * of one or more repetition of $left$operator$right blocks
+		 * where "$left is free from '=' and ' '" and "$operator is
+		 * either '=' or '!='" and "$right is free from ' '".
+		 * Therefore, we can reconstruct the original line at the end
+		 * of dry run even if we overwrite $operator with '\0'.
+		 */
+		cp = strchr(pos, ' ');
+		if (cp) {
+			*cp = '\0'; /* Will restore later. */
+			pos = cp + 1;
+		} else {
+			pos = "";
+		}
+		right_word = strchr(left_word, '=');
+		if (!right_word || right_word == left_word)
+			goto out;
+		is_not = *(right_word - 1) == '!';
+		if (is_not)
+			*(right_word++ - 1) = '\0'; /* Will restore later. */
+		else if (*(right_word + 1) != '=')
+			*right_word++ = '\0'; /* Will restore later. */
+		else
+			goto out;
+		dprintk(KERN_WARNING "%u: <%s>%s=<%s>\n", __LINE__, left_word,
+			is_not ? "!" : "", right_word);
+		if (!strcmp(left_word, "grant_log")) {
+			if (entry) {
+				if (is_not ||
+				    entry->grant_log != TOMOYO_GRANTLOG_AUTO)
+					goto out;
+				else if (!strcmp(right_word, "yes"))
+					entry->grant_log = TOMOYO_GRANTLOG_YES;
+				else if (!strcmp(right_word, "no"))
+					entry->grant_log = TOMOYO_GRANTLOG_NO;
+				else
+					goto out;
+			}
+			continue;
+		}
+		if (!strncmp(left_word, "exec.argv[", 10)) {
+			if (!argv) {
+				e.argc++;
+				e.condc++;
+			} else {
+				e.argc--;
+				e.condc--;
+				left = TOMOYO_ARGV_ENTRY;
+				argv->is_not = is_not;
+				if (!tomoyo_parse_argv(left_word + 10,
+						       right_word, argv++))
+					goto out;
+			}
+			goto store_value;
+		}
+		if (!strncmp(left_word, "exec.envp[\"", 11)) {
+			if (!envp) {
+				e.envc++;
+				e.condc++;
+			} else {
+				e.envc--;
+				e.condc--;
+				left = TOMOYO_ENVP_ENTRY;
+				envp->is_not = is_not;
+				if (!tomoyo_parse_envp(left_word + 11,
+						       right_word, envp++))
+					goto out;
+			}
+			goto store_value;
+		}
+		left = tomoyo_condition_type(left_word);
+		dprintk(KERN_WARNING "%u: <%s> left=%u\n", __LINE__, left_word,
+			left);
+		if (left == TOMOYO_MAX_CONDITION_KEYWORD) {
+			if (!numbers_p) {
+				e.numbers_count++;
+			} else {
+				e.numbers_count--;
+				left = TOMOYO_NUMBER_UNION;
+				param->data = left_word;
+				if (*left_word == '@' ||
+				    !tomoyo_parse_number_union(param,
+							       numbers_p++))
+					goto out;
+			}
+		}
+		if (!condp)
+			e.condc++;
+		else
+			e.condc--;
+		if (left == TOMOYO_EXEC_REALPATH ||
+		    left == TOMOYO_SYMLINK_TARGET) {
+			if (!names_p) {
+				e.names_count++;
+			} else {
+				e.names_count--;
+				right = TOMOYO_NAME_UNION;
+				param->data = right_word;
+				if (!tomoyo_parse_name_union_quoted(param,
+								    names_p++))
+					goto out;
+			}
+			goto store_value;
+		}
+		right = tomoyo_condition_type(right_word);
+		if (right == TOMOYO_MAX_CONDITION_KEYWORD) {
+			if (!numbers_p) {
+				e.numbers_count++;
+			} else {
+				e.numbers_count--;
+				right = TOMOYO_NUMBER_UNION;
+				param->data = right_word;
+				if (!tomoyo_parse_number_union(param,
+							       numbers_p++))
+					goto out;
+			}
+		}
+store_value:
+		if (!condp) {
+			dprintk(KERN_WARNING "%u: dry_run left=%u right=%u "
+				"match=%u\n", __LINE__, left, right, !is_not);
+			continue;
+		}
+		condp->left = left;
+		condp->right = right;
+		condp->equals = !is_not;
+		dprintk(KERN_WARNING "%u: left=%u right=%u match=%u\n",
+			__LINE__, condp->left, condp->right,
+			condp->equals);
+		condp++;
+	}
+	dprintk(KERN_INFO "%u: cond=%u numbers=%u names=%u ac=%u ec=%u\n",
+		__LINE__, e.condc, e.numbers_count, e.names_count, e.argc,
+		e.envc);
+	if (entry) {
+		BUG_ON(e.names_count | e.numbers_count | e.argc | e.envc |
+		       e.condc);
+		return tomoyo_commit_condition(entry);
+	}
+	e.size = sizeof(*entry)
+		+ e.condc * sizeof(struct tomoyo_condition_element)
+		+ e.numbers_count * sizeof(struct tomoyo_number_union)
+		+ e.names_count * sizeof(struct tomoyo_name_union)
+		+ e.argc * sizeof(struct tomoyo_argv)
+		+ e.envc * sizeof(struct tomoyo_envp);
+	entry = kzalloc(e.size, GFP_NOFS);
+	if (!entry)
+		goto out2;
+	*entry = e;
+	e.transit = NULL;
+	condp = (struct tomoyo_condition_element *) (entry + 1);
+	numbers_p = (struct tomoyo_number_union *) (condp + e.condc);
+	names_p = (struct tomoyo_name_union *) (numbers_p + e.numbers_count);
+	argv = (struct tomoyo_argv *) (names_p + e.names_count);
+	envp = (struct tomoyo_envp *) (argv + e.argc);
+	{
+		bool flag = false;
+		for (pos = start_of_string; pos < end_of_string; pos++) {
+			if (*pos)
+				continue;
+			if (flag) /* Restore " ". */
+				*pos = ' ';
+			else if (*(pos + 1) == '=') /* Restore "!=". */
+				*pos = '!';
+			else /* Restore "=". */
+				*pos = '=';
+			flag = !flag;
+		}
+	}
+	goto rerun;
+out:
+	dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__);
+	if (entry) {
+		tomoyo_del_condition(&entry->head.list);
+		kfree(entry);
+	}
+out2:
+	tomoyo_put_name(e.transit);
+	return NULL;
+}
+
+/**
+ * tomoyo_get_attributes - Revalidate "struct inode".
+ *
+ * @obj: Pointer to "struct tomoyo_obj_info".
+ *
+ * Returns nothing.
+ */
+void tomoyo_get_attributes(struct tomoyo_obj_info *obj)
+{
+	u8 i;
+	struct dentry *dentry = NULL;
+
+	for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) {
+		struct inode *inode;
+		switch (i) {
+		case TOMOYO_PATH1:
+			dentry = obj->path1.dentry;
+			if (!dentry)
+				continue;
+			break;
+		case TOMOYO_PATH2:
+			dentry = obj->path2.dentry;
+			if (!dentry)
+				continue;
+			break;
+		default:
+			if (!dentry)
+				continue;
+			dentry = dget_parent(dentry);
+			break;
+		}
+		inode = d_backing_inode(dentry);
+		if (inode) {
+			struct tomoyo_mini_stat *stat = &obj->stat[i];
+			stat->uid  = inode->i_uid;
+			stat->gid  = inode->i_gid;
+			stat->ino  = inode->i_ino;
+			stat->mode = inode->i_mode;
+			stat->dev  = inode->i_sb->s_dev;
+			stat->rdev = inode->i_rdev;
+			obj->stat_valid[i] = true;
+		}
+		if (i & 1) /* i == TOMOYO_PATH1_PARENT ||
+			      i == TOMOYO_PATH2_PARENT */
+			dput(dentry);
+	}
+}
+
+/**
+ * tomoyo_condition - Check condition part.
+ *
+ * @r:    Pointer to "struct tomoyo_request_info".
+ * @cond: Pointer to "struct tomoyo_condition". Maybe NULL.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_condition(struct tomoyo_request_info *r,
+		      const struct tomoyo_condition *cond)
+{
+	u32 i;
+	unsigned long min_v[2] = { 0, 0 };
+	unsigned long max_v[2] = { 0, 0 };
+	const struct tomoyo_condition_element *condp;
+	const struct tomoyo_number_union *numbers_p;
+	const struct tomoyo_name_union *names_p;
+	const struct tomoyo_argv *argv;
+	const struct tomoyo_envp *envp;
+	struct tomoyo_obj_info *obj;
+	u16 condc;
+	u16 argc;
+	u16 envc;
+	struct linux_binprm *bprm = NULL;
+	if (!cond)
+		return true;
+	condc = cond->condc;
+	argc = cond->argc;
+	envc = cond->envc;
+	obj = r->obj;
+	if (r->ee)
+		bprm = r->ee->bprm;
+	if (!bprm && (argc || envc))
+		return false;
+	condp = (struct tomoyo_condition_element *) (cond + 1);
+	numbers_p = (const struct tomoyo_number_union *) (condp + condc);
+	names_p = (const struct tomoyo_name_union *)
+		(numbers_p + cond->numbers_count);
+	argv = (const struct tomoyo_argv *) (names_p + cond->names_count);
+	envp = (const struct tomoyo_envp *) (argv + argc);
+	for (i = 0; i < condc; i++) {
+		const bool match = condp->equals;
+		const u8 left = condp->left;
+		const u8 right = condp->right;
+		bool is_bitop[2] = { false, false };
+		u8 j;
+		condp++;
+		/* Check argv[] and envp[] later. */
+		if (left == TOMOYO_ARGV_ENTRY || left == TOMOYO_ENVP_ENTRY)
+			continue;
+		/* Check string expressions. */
+		if (right == TOMOYO_NAME_UNION) {
+			const struct tomoyo_name_union *ptr = names_p++;
+			switch (left) {
+				struct tomoyo_path_info *symlink;
+				struct tomoyo_execve *ee;
+				struct file *file;
+			case TOMOYO_SYMLINK_TARGET:
+				symlink = obj ? obj->symlink_target : NULL;
+				if (!symlink ||
+				    !tomoyo_compare_name_union(symlink, ptr)
+				    == match)
+					goto out;
+				break;
+			case TOMOYO_EXEC_REALPATH:
+				ee = r->ee;
+				file = ee ? ee->bprm->file : NULL;
+				if (!tomoyo_scan_exec_realpath(file, ptr,
+							       match))
+					goto out;
+				break;
+			}
+			continue;
+		}
+		/* Check numeric or bit-op expressions. */
+		for (j = 0; j < 2; j++) {
+			const u8 index = j ? right : left;
+			unsigned long value = 0;
+			switch (index) {
+			case TOMOYO_TASK_UID:
+				value = from_kuid(&init_user_ns, current_uid());
+				break;
+			case TOMOYO_TASK_EUID:
+				value = from_kuid(&init_user_ns, current_euid());
+				break;
+			case TOMOYO_TASK_SUID:
+				value = from_kuid(&init_user_ns, current_suid());
+				break;
+			case TOMOYO_TASK_FSUID:
+				value = from_kuid(&init_user_ns, current_fsuid());
+				break;
+			case TOMOYO_TASK_GID:
+				value = from_kgid(&init_user_ns, current_gid());
+				break;
+			case TOMOYO_TASK_EGID:
+				value = from_kgid(&init_user_ns, current_egid());
+				break;
+			case TOMOYO_TASK_SGID:
+				value = from_kgid(&init_user_ns, current_sgid());
+				break;
+			case TOMOYO_TASK_FSGID:
+				value = from_kgid(&init_user_ns, current_fsgid());
+				break;
+			case TOMOYO_TASK_PID:
+				value = tomoyo_sys_getpid();
+				break;
+			case TOMOYO_TASK_PPID:
+				value = tomoyo_sys_getppid();
+				break;
+			case TOMOYO_TYPE_IS_SOCKET:
+				value = S_IFSOCK;
+				break;
+			case TOMOYO_TYPE_IS_SYMLINK:
+				value = S_IFLNK;
+				break;
+			case TOMOYO_TYPE_IS_FILE:
+				value = S_IFREG;
+				break;
+			case TOMOYO_TYPE_IS_BLOCK_DEV:
+				value = S_IFBLK;
+				break;
+			case TOMOYO_TYPE_IS_DIRECTORY:
+				value = S_IFDIR;
+				break;
+			case TOMOYO_TYPE_IS_CHAR_DEV:
+				value = S_IFCHR;
+				break;
+			case TOMOYO_TYPE_IS_FIFO:
+				value = S_IFIFO;
+				break;
+			case TOMOYO_MODE_SETUID:
+				value = S_ISUID;
+				break;
+			case TOMOYO_MODE_SETGID:
+				value = S_ISGID;
+				break;
+			case TOMOYO_MODE_STICKY:
+				value = S_ISVTX;
+				break;
+			case TOMOYO_MODE_OWNER_READ:
+				value = S_IRUSR;
+				break;
+			case TOMOYO_MODE_OWNER_WRITE:
+				value = S_IWUSR;
+				break;
+			case TOMOYO_MODE_OWNER_EXECUTE:
+				value = S_IXUSR;
+				break;
+			case TOMOYO_MODE_GROUP_READ:
+				value = S_IRGRP;
+				break;
+			case TOMOYO_MODE_GROUP_WRITE:
+				value = S_IWGRP;
+				break;
+			case TOMOYO_MODE_GROUP_EXECUTE:
+				value = S_IXGRP;
+				break;
+			case TOMOYO_MODE_OTHERS_READ:
+				value = S_IROTH;
+				break;
+			case TOMOYO_MODE_OTHERS_WRITE:
+				value = S_IWOTH;
+				break;
+			case TOMOYO_MODE_OTHERS_EXECUTE:
+				value = S_IXOTH;
+				break;
+			case TOMOYO_EXEC_ARGC:
+				if (!bprm)
+					goto out;
+				value = bprm->argc;
+				break;
+			case TOMOYO_EXEC_ENVC:
+				if (!bprm)
+					goto out;
+				value = bprm->envc;
+				break;
+			case TOMOYO_NUMBER_UNION:
+				/* Fetch values later. */
+				break;
+			default:
+				if (!obj)
+					goto out;
+				if (!obj->validate_done) {
+					tomoyo_get_attributes(obj);
+					obj->validate_done = true;
+				}
+				{
+					u8 stat_index;
+					struct tomoyo_mini_stat *stat;
+					switch (index) {
+					case TOMOYO_PATH1_UID:
+					case TOMOYO_PATH1_GID:
+					case TOMOYO_PATH1_INO:
+					case TOMOYO_PATH1_MAJOR:
+					case TOMOYO_PATH1_MINOR:
+					case TOMOYO_PATH1_TYPE:
+					case TOMOYO_PATH1_DEV_MAJOR:
+					case TOMOYO_PATH1_DEV_MINOR:
+					case TOMOYO_PATH1_PERM:
+						stat_index = TOMOYO_PATH1;
+						break;
+					case TOMOYO_PATH2_UID:
+					case TOMOYO_PATH2_GID:
+					case TOMOYO_PATH2_INO:
+					case TOMOYO_PATH2_MAJOR:
+					case TOMOYO_PATH2_MINOR:
+					case TOMOYO_PATH2_TYPE:
+					case TOMOYO_PATH2_DEV_MAJOR:
+					case TOMOYO_PATH2_DEV_MINOR:
+					case TOMOYO_PATH2_PERM:
+						stat_index = TOMOYO_PATH2;
+						break;
+					case TOMOYO_PATH1_PARENT_UID:
+					case TOMOYO_PATH1_PARENT_GID:
+					case TOMOYO_PATH1_PARENT_INO:
+					case TOMOYO_PATH1_PARENT_PERM:
+						stat_index =
+							TOMOYO_PATH1_PARENT;
+						break;
+					case TOMOYO_PATH2_PARENT_UID:
+					case TOMOYO_PATH2_PARENT_GID:
+					case TOMOYO_PATH2_PARENT_INO:
+					case TOMOYO_PATH2_PARENT_PERM:
+						stat_index =
+							TOMOYO_PATH2_PARENT;
+						break;
+					default:
+						goto out;
+					}
+					if (!obj->stat_valid[stat_index])
+						goto out;
+					stat = &obj->stat[stat_index];
+					switch (index) {
+					case TOMOYO_PATH1_UID:
+					case TOMOYO_PATH2_UID:
+					case TOMOYO_PATH1_PARENT_UID:
+					case TOMOYO_PATH2_PARENT_UID:
+						value = from_kuid(&init_user_ns, stat->uid);
+						break;
+					case TOMOYO_PATH1_GID:
+					case TOMOYO_PATH2_GID:
+					case TOMOYO_PATH1_PARENT_GID:
+					case TOMOYO_PATH2_PARENT_GID:
+						value = from_kgid(&init_user_ns, stat->gid);
+						break;
+					case TOMOYO_PATH1_INO:
+					case TOMOYO_PATH2_INO:
+					case TOMOYO_PATH1_PARENT_INO:
+					case TOMOYO_PATH2_PARENT_INO:
+						value = stat->ino;
+						break;
+					case TOMOYO_PATH1_MAJOR:
+					case TOMOYO_PATH2_MAJOR:
+						value = MAJOR(stat->dev);
+						break;
+					case TOMOYO_PATH1_MINOR:
+					case TOMOYO_PATH2_MINOR:
+						value = MINOR(stat->dev);
+						break;
+					case TOMOYO_PATH1_TYPE:
+					case TOMOYO_PATH2_TYPE:
+						value = stat->mode & S_IFMT;
+						break;
+					case TOMOYO_PATH1_DEV_MAJOR:
+					case TOMOYO_PATH2_DEV_MAJOR:
+						value = MAJOR(stat->rdev);
+						break;
+					case TOMOYO_PATH1_DEV_MINOR:
+					case TOMOYO_PATH2_DEV_MINOR:
+						value = MINOR(stat->rdev);
+						break;
+					case TOMOYO_PATH1_PERM:
+					case TOMOYO_PATH2_PERM:
+					case TOMOYO_PATH1_PARENT_PERM:
+					case TOMOYO_PATH2_PARENT_PERM:
+						value = stat->mode & S_IALLUGO;
+						break;
+					}
+				}
+				break;
+			}
+			max_v[j] = value;
+			min_v[j] = value;
+			switch (index) {
+			case TOMOYO_MODE_SETUID:
+			case TOMOYO_MODE_SETGID:
+			case TOMOYO_MODE_STICKY:
+			case TOMOYO_MODE_OWNER_READ:
+			case TOMOYO_MODE_OWNER_WRITE:
+			case TOMOYO_MODE_OWNER_EXECUTE:
+			case TOMOYO_MODE_GROUP_READ:
+			case TOMOYO_MODE_GROUP_WRITE:
+			case TOMOYO_MODE_GROUP_EXECUTE:
+			case TOMOYO_MODE_OTHERS_READ:
+			case TOMOYO_MODE_OTHERS_WRITE:
+			case TOMOYO_MODE_OTHERS_EXECUTE:
+				is_bitop[j] = true;
+			}
+		}
+		if (left == TOMOYO_NUMBER_UNION) {
+			/* Fetch values now. */
+			const struct tomoyo_number_union *ptr = numbers_p++;
+			min_v[0] = ptr->values[0];
+			max_v[0] = ptr->values[1];
+		}
+		if (right == TOMOYO_NUMBER_UNION) {
+			/* Fetch values now. */
+			const struct tomoyo_number_union *ptr = numbers_p++;
+			if (ptr->group) {
+				if (tomoyo_number_matches_group(min_v[0],
+								max_v[0],
+								ptr->group)
+				    == match)
+					continue;
+			} else {
+				if ((min_v[0] <= ptr->values[1] &&
+				     max_v[0] >= ptr->values[0]) == match)
+					continue;
+			}
+			goto out;
+		}
+		/*
+		 * Bit operation is valid only when counterpart value
+		 * represents permission.
+		 */
+		if (is_bitop[0] && is_bitop[1]) {
+			goto out;
+		} else if (is_bitop[0]) {
+			switch (right) {
+			case TOMOYO_PATH1_PERM:
+			case TOMOYO_PATH1_PARENT_PERM:
+			case TOMOYO_PATH2_PERM:
+			case TOMOYO_PATH2_PARENT_PERM:
+				if (!(max_v[0] & max_v[1]) == !match)
+					continue;
+			}
+			goto out;
+		} else if (is_bitop[1]) {
+			switch (left) {
+			case TOMOYO_PATH1_PERM:
+			case TOMOYO_PATH1_PARENT_PERM:
+			case TOMOYO_PATH2_PERM:
+			case TOMOYO_PATH2_PARENT_PERM:
+				if (!(max_v[0] & max_v[1]) == !match)
+					continue;
+			}
+			goto out;
+		}
+		/* Normal value range comparison. */
+		if ((min_v[0] <= max_v[1] && max_v[0] >= min_v[1]) == match)
+			continue;
+out:
+		return false;
+	}
+	/* Check argv[] and envp[] now. */
+	if (r->ee && (argc || envc))
+		return tomoyo_scan_bprm(r->ee, argc, argv, envc, envp);
+	return true;
+}
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
new file mode 100644
index 0000000..3865145
--- /dev/null
+++ b/security/tomoyo/domain.c
@@ -0,0 +1,901 @@
+/*
+ * security/tomoyo/domain.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/binfmts.h>
+#include <linux/slab.h>
+
+/* Variables definitions.*/
+
+/* The initial domain. */
+struct tomoyo_domain_info tomoyo_kernel_domain;
+
+/**
+ * tomoyo_update_policy - Update an entry for exception policy.
+ *
+ * @new_entry:       Pointer to "struct tomoyo_acl_info".
+ * @size:            Size of @new_entry in bytes.
+ * @param:           Pointer to "struct tomoyo_acl_param".
+ * @check_duplicate: Callback function to find duplicated entry.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
+			 struct tomoyo_acl_param *param,
+			 bool (*check_duplicate) (const struct tomoyo_acl_head
+						  *,
+						  const struct tomoyo_acl_head
+						  *))
+{
+	int error = param->is_delete ? -ENOENT : -ENOMEM;
+	struct tomoyo_acl_head *entry;
+	struct list_head *list = param->list;
+
+	if (mutex_lock_interruptible(&tomoyo_policy_lock))
+		return -ENOMEM;
+	list_for_each_entry_rcu(entry, list, list) {
+		if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+			continue;
+		if (!check_duplicate(entry, new_entry))
+			continue;
+		entry->is_deleted = param->is_delete;
+		error = 0;
+		break;
+	}
+	if (error && !param->is_delete) {
+		entry = tomoyo_commit_ok(new_entry, size);
+		if (entry) {
+			list_add_tail_rcu(&entry->list, list);
+			error = 0;
+		}
+	}
+	mutex_unlock(&tomoyo_policy_lock);
+	return error;
+}
+
+/**
+ * tomoyo_same_acl_head - Check for duplicated "struct tomoyo_acl_info" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *a,
+					const struct tomoyo_acl_info *b)
+{
+	return a->type == b->type && a->cond == b->cond;
+}
+
+/**
+ * tomoyo_update_domain - Update an entry for domain policy.
+ *
+ * @new_entry:       Pointer to "struct tomoyo_acl_info".
+ * @size:            Size of @new_entry in bytes.
+ * @param:           Pointer to "struct tomoyo_acl_param".
+ * @check_duplicate: Callback function to find duplicated entry.
+ * @merge_duplicate: Callback function to merge duplicated entry.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
+			 struct tomoyo_acl_param *param,
+			 bool (*check_duplicate) (const struct tomoyo_acl_info
+						  *,
+						  const struct tomoyo_acl_info
+						  *),
+			 bool (*merge_duplicate) (struct tomoyo_acl_info *,
+						  struct tomoyo_acl_info *,
+						  const bool))
+{
+	const bool is_delete = param->is_delete;
+	int error = is_delete ? -ENOENT : -ENOMEM;
+	struct tomoyo_acl_info *entry;
+	struct list_head * const list = param->list;
+
+	if (param->data[0]) {
+		new_entry->cond = tomoyo_get_condition(param);
+		if (!new_entry->cond)
+			return -EINVAL;
+		/*
+		 * Domain transition preference is allowed for only
+		 * "file execute" entries.
+		 */
+		if (new_entry->cond->transit &&
+		    !(new_entry->type == TOMOYO_TYPE_PATH_ACL &&
+		      container_of(new_entry, struct tomoyo_path_acl, head)
+		      ->perm == 1 << TOMOYO_TYPE_EXECUTE))
+			goto out;
+	}
+	if (mutex_lock_interruptible(&tomoyo_policy_lock))
+		goto out;
+	list_for_each_entry_rcu(entry, list, list) {
+		if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+			continue;
+		if (!tomoyo_same_acl_head(entry, new_entry) ||
+		    !check_duplicate(entry, new_entry))
+			continue;
+		if (merge_duplicate)
+			entry->is_deleted = merge_duplicate(entry, new_entry,
+							    is_delete);
+		else
+			entry->is_deleted = is_delete;
+		error = 0;
+		break;
+	}
+	if (error && !is_delete) {
+		entry = tomoyo_commit_ok(new_entry, size);
+		if (entry) {
+			list_add_tail_rcu(&entry->list, list);
+			error = 0;
+		}
+	}
+	mutex_unlock(&tomoyo_policy_lock);
+out:
+	tomoyo_put_condition(new_entry->cond);
+	return error;
+}
+
+/**
+ * tomoyo_check_acl - Do permission check.
+ *
+ * @r:           Pointer to "struct tomoyo_request_info".
+ * @check_entry: Callback function to check type specific parameters.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+void tomoyo_check_acl(struct tomoyo_request_info *r,
+		      bool (*check_entry) (struct tomoyo_request_info *,
+					   const struct tomoyo_acl_info *))
+{
+	const struct tomoyo_domain_info *domain = r->domain;
+	struct tomoyo_acl_info *ptr;
+	bool retried = false;
+	const struct list_head *list = &domain->acl_info_list;
+
+retry:
+	list_for_each_entry_rcu(ptr, list, list) {
+		if (ptr->is_deleted || ptr->type != r->param_type)
+			continue;
+		if (!check_entry(r, ptr))
+			continue;
+		if (!tomoyo_condition(r, ptr->cond))
+			continue;
+		r->matched_acl = ptr;
+		r->granted = true;
+		return;
+	}
+	if (!retried) {
+		retried = true;
+		list = &domain->ns->acl_group[domain->group];
+		goto retry;
+	}
+	r->granted = false;
+}
+
+/* The list for "struct tomoyo_domain_info". */
+LIST_HEAD(tomoyo_domain_list);
+
+/**
+ * tomoyo_last_word - Get last component of a domainname.
+ *
+ * @name: Domainname to check.
+ *
+ * Returns the last word of @domainname.
+ */
+static const char *tomoyo_last_word(const char *name)
+{
+	const char *cp = strrchr(name, ' ');
+	if (cp)
+		return cp + 1;
+	return name;
+}
+
+/**
+ * tomoyo_same_transition_control - Check for duplicated "struct tomoyo_transition_control" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_transition_control(const struct tomoyo_acl_head *a,
+					   const struct tomoyo_acl_head *b)
+{
+	const struct tomoyo_transition_control *p1 = container_of(a,
+								  typeof(*p1),
+								  head);
+	const struct tomoyo_transition_control *p2 = container_of(b,
+								  typeof(*p2),
+								  head);
+	return p1->type == p2->type && p1->is_last_name == p2->is_last_name
+		&& p1->domainname == p2->domainname
+		&& p1->program == p2->program;
+}
+
+/**
+ * tomoyo_write_transition_control - Write "struct tomoyo_transition_control" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @type:  Type of this entry.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_transition_control(struct tomoyo_acl_param *param,
+				    const u8 type)
+{
+	struct tomoyo_transition_control e = { .type = type };
+	int error = param->is_delete ? -ENOENT : -ENOMEM;
+	char *program = param->data;
+	char *domainname = strstr(program, " from ");
+	if (domainname) {
+		*domainname = '\0';
+		domainname += 6;
+	} else if (type == TOMOYO_TRANSITION_CONTROL_NO_KEEP ||
+		   type == TOMOYO_TRANSITION_CONTROL_KEEP) {
+		domainname = program;
+		program = NULL;
+	}
+	if (program && strcmp(program, "any")) {
+		if (!tomoyo_correct_path(program))
+			return -EINVAL;
+		e.program = tomoyo_get_name(program);
+		if (!e.program)
+			goto out;
+	}
+	if (domainname && strcmp(domainname, "any")) {
+		if (!tomoyo_correct_domain(domainname)) {
+			if (!tomoyo_correct_path(domainname))
+				goto out;
+			e.is_last_name = true;
+		}
+		e.domainname = tomoyo_get_name(domainname);
+		if (!e.domainname)
+			goto out;
+	}
+	param->list = &param->ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL];
+	error = tomoyo_update_policy(&e.head, sizeof(e), param,
+				     tomoyo_same_transition_control);
+out:
+	tomoyo_put_name(e.domainname);
+	tomoyo_put_name(e.program);
+	return error;
+}
+
+/**
+ * tomoyo_scan_transition - Try to find specific domain transition type.
+ *
+ * @list:       Pointer to "struct list_head".
+ * @domainname: The name of current domain.
+ * @program:    The name of requested program.
+ * @last_name:  The last component of @domainname.
+ * @type:       One of values in "enum tomoyo_transition_type".
+ *
+ * Returns true if found one, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static inline bool tomoyo_scan_transition
+(const struct list_head *list, const struct tomoyo_path_info *domainname,
+ const struct tomoyo_path_info *program, const char *last_name,
+ const enum tomoyo_transition_type type)
+{
+	const struct tomoyo_transition_control *ptr;
+	list_for_each_entry_rcu(ptr, list, head.list) {
+		if (ptr->head.is_deleted || ptr->type != type)
+			continue;
+		if (ptr->domainname) {
+			if (!ptr->is_last_name) {
+				if (ptr->domainname != domainname)
+					continue;
+			} else {
+				/*
+				 * Use direct strcmp() since this is
+				 * unlikely used.
+				 */
+				if (strcmp(ptr->domainname->name, last_name))
+					continue;
+			}
+		}
+		if (ptr->program && tomoyo_pathcmp(ptr->program, program))
+			continue;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * tomoyo_transition_type - Get domain transition type.
+ *
+ * @ns:         Pointer to "struct tomoyo_policy_namespace".
+ * @domainname: The name of current domain.
+ * @program:    The name of requested program.
+ *
+ * Returns TOMOYO_TRANSITION_CONTROL_TRANSIT if executing @program causes
+ * domain transition across namespaces, TOMOYO_TRANSITION_CONTROL_INITIALIZE if
+ * executing @program reinitializes domain transition within that namespace,
+ * TOMOYO_TRANSITION_CONTROL_KEEP if executing @program stays at @domainname ,
+ * others otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static enum tomoyo_transition_type tomoyo_transition_type
+(const struct tomoyo_policy_namespace *ns,
+ const struct tomoyo_path_info *domainname,
+ const struct tomoyo_path_info *program)
+{
+	const char *last_name = tomoyo_last_word(domainname->name);
+	enum tomoyo_transition_type type = TOMOYO_TRANSITION_CONTROL_NO_RESET;
+	while (type < TOMOYO_MAX_TRANSITION_TYPE) {
+		const struct list_head * const list =
+			&ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL];
+		if (!tomoyo_scan_transition(list, domainname, program,
+					    last_name, type)) {
+			type++;
+			continue;
+		}
+		if (type != TOMOYO_TRANSITION_CONTROL_NO_RESET &&
+		    type != TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE)
+			break;
+		/*
+		 * Do not check for reset_domain if no_reset_domain matched.
+		 * Do not check for initialize_domain if no_initialize_domain
+		 * matched.
+		 */
+		type++;
+		type++;
+	}
+	return type;
+}
+
+/**
+ * tomoyo_same_aggregator - Check for duplicated "struct tomoyo_aggregator" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_aggregator(const struct tomoyo_acl_head *a,
+				   const struct tomoyo_acl_head *b)
+{
+	const struct tomoyo_aggregator *p1 = container_of(a, typeof(*p1),
+							  head);
+	const struct tomoyo_aggregator *p2 = container_of(b, typeof(*p2),
+							  head);
+	return p1->original_name == p2->original_name &&
+		p1->aggregated_name == p2->aggregated_name;
+}
+
+/**
+ * tomoyo_write_aggregator - Write "struct tomoyo_aggregator" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_aggregator(struct tomoyo_acl_param *param)
+{
+	struct tomoyo_aggregator e = { };
+	int error = param->is_delete ? -ENOENT : -ENOMEM;
+	const char *original_name = tomoyo_read_token(param);
+	const char *aggregated_name = tomoyo_read_token(param);
+	if (!tomoyo_correct_word(original_name) ||
+	    !tomoyo_correct_path(aggregated_name))
+		return -EINVAL;
+	e.original_name = tomoyo_get_name(original_name);
+	e.aggregated_name = tomoyo_get_name(aggregated_name);
+	if (!e.original_name || !e.aggregated_name ||
+	    e.aggregated_name->is_patterned) /* No patterns allowed. */
+		goto out;
+	param->list = &param->ns->policy_list[TOMOYO_ID_AGGREGATOR];
+	error = tomoyo_update_policy(&e.head, sizeof(e), param,
+				     tomoyo_same_aggregator);
+out:
+	tomoyo_put_name(e.original_name);
+	tomoyo_put_name(e.aggregated_name);
+	return error;
+}
+
+/**
+ * tomoyo_find_namespace - Find specified namespace.
+ *
+ * @name: Name of namespace to find.
+ * @len:  Length of @name.
+ *
+ * Returns pointer to "struct tomoyo_policy_namespace" if found,
+ * NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static struct tomoyo_policy_namespace *tomoyo_find_namespace
+(const char *name, const unsigned int len)
+{
+	struct tomoyo_policy_namespace *ns;
+	list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
+		if (strncmp(name, ns->name, len) ||
+		    (name[len] && name[len] != ' '))
+			continue;
+		return ns;
+	}
+	return NULL;
+}
+
+/**
+ * tomoyo_assign_namespace - Create a new namespace.
+ *
+ * @domainname: Name of namespace to create.
+ *
+ * Returns pointer to "struct tomoyo_policy_namespace" on success,
+ * NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+struct tomoyo_policy_namespace *tomoyo_assign_namespace(const char *domainname)
+{
+	struct tomoyo_policy_namespace *ptr;
+	struct tomoyo_policy_namespace *entry;
+	const char *cp = domainname;
+	unsigned int len = 0;
+	while (*cp && *cp++ != ' ')
+		len++;
+	ptr = tomoyo_find_namespace(domainname, len);
+	if (ptr)
+		return ptr;
+	if (len >= TOMOYO_EXEC_TMPSIZE - 10 || !tomoyo_domain_def(domainname))
+		return NULL;
+	entry = kzalloc(sizeof(*entry) + len + 1, GFP_NOFS);
+	if (!entry)
+		return NULL;
+	if (mutex_lock_interruptible(&tomoyo_policy_lock))
+		goto out;
+	ptr = tomoyo_find_namespace(domainname, len);
+	if (!ptr && tomoyo_memory_ok(entry)) {
+		char *name = (char *) (entry + 1);
+		ptr = entry;
+		memmove(name, domainname, len);
+		name[len] = '\0';
+		entry->name = name;
+		tomoyo_init_policy_namespace(entry);
+		entry = NULL;
+	}
+	mutex_unlock(&tomoyo_policy_lock);
+out:
+	kfree(entry);
+	return ptr;
+}
+
+/**
+ * tomoyo_namespace_jump - Check for namespace jump.
+ *
+ * @domainname: Name of domain.
+ *
+ * Returns true if namespace differs, false otherwise.
+ */
+static bool tomoyo_namespace_jump(const char *domainname)
+{
+	const char *namespace = tomoyo_current_namespace()->name;
+	const int len = strlen(namespace);
+	return strncmp(domainname, namespace, len) ||
+		(domainname[len] && domainname[len] != ' ');
+}
+
+/**
+ * tomoyo_assign_domain - Create a domain or a namespace.
+ *
+ * @domainname: The name of domain.
+ * @transit:    True if transit to domain found or created.
+ *
+ * Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
+						const bool transit)
+{
+	struct tomoyo_domain_info e = { };
+	struct tomoyo_domain_info *entry = tomoyo_find_domain(domainname);
+	bool created = false;
+	if (entry) {
+		if (transit) {
+			/*
+			 * Since namespace is created at runtime, profiles may
+			 * not be created by the moment the process transits to
+			 * that domain. Do not perform domain transition if
+			 * profile for that domain is not yet created.
+			 */
+			if (tomoyo_policy_loaded &&
+			    !entry->ns->profile_ptr[entry->profile])
+				return NULL;
+		}
+		return entry;
+	}
+	/* Requested domain does not exist. */
+	/* Don't create requested domain if domainname is invalid. */
+	if (strlen(domainname) >= TOMOYO_EXEC_TMPSIZE - 10 ||
+	    !tomoyo_correct_domain(domainname))
+		return NULL;
+	/*
+	 * Since definition of profiles and acl_groups may differ across
+	 * namespaces, do not inherit "use_profile" and "use_group" settings
+	 * by automatically creating requested domain upon domain transition.
+	 */
+	if (transit && tomoyo_namespace_jump(domainname))
+		return NULL;
+	e.ns = tomoyo_assign_namespace(domainname);
+	if (!e.ns)
+		return NULL;
+	/*
+	 * "use_profile" and "use_group" settings for automatically created
+	 * domains are inherited from current domain. These are 0 for manually
+	 * created domains.
+	 */
+	if (transit) {
+		const struct tomoyo_domain_info *domain = tomoyo_domain();
+		e.profile = domain->profile;
+		e.group = domain->group;
+	}
+	e.domainname = tomoyo_get_name(domainname);
+	if (!e.domainname)
+		return NULL;
+	if (mutex_lock_interruptible(&tomoyo_policy_lock))
+		goto out;
+	entry = tomoyo_find_domain(domainname);
+	if (!entry) {
+		entry = tomoyo_commit_ok(&e, sizeof(e));
+		if (entry) {
+			INIT_LIST_HEAD(&entry->acl_info_list);
+			list_add_tail_rcu(&entry->list, &tomoyo_domain_list);
+			created = true;
+		}
+	}
+	mutex_unlock(&tomoyo_policy_lock);
+out:
+	tomoyo_put_name(e.domainname);
+	if (entry && transit) {
+		if (created) {
+			struct tomoyo_request_info r;
+			tomoyo_init_request_info(&r, entry,
+						 TOMOYO_MAC_FILE_EXECUTE);
+			r.granted = false;
+			tomoyo_write_log(&r, "use_profile %u\n",
+					 entry->profile);
+			tomoyo_write_log(&r, "use_group %u\n", entry->group);
+			tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
+		}
+	}
+	return entry;
+}
+
+/**
+ * tomoyo_environ - Check permission for environment variable names.
+ *
+ * @ee: Pointer to "struct tomoyo_execve".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_environ(struct tomoyo_execve *ee)
+{
+	struct tomoyo_request_info *r = &ee->r;
+	struct linux_binprm *bprm = ee->bprm;
+	/* env_page.data is allocated by tomoyo_dump_page(). */
+	struct tomoyo_page_dump env_page = { };
+	char *arg_ptr; /* Size is TOMOYO_EXEC_TMPSIZE bytes */
+	int arg_len = 0;
+	unsigned long pos = bprm->p;
+	int offset = pos % PAGE_SIZE;
+	int argv_count = bprm->argc;
+	int envp_count = bprm->envc;
+	int error = -ENOMEM;
+
+	ee->r.type = TOMOYO_MAC_ENVIRON;
+	ee->r.profile = r->domain->profile;
+	ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile,
+				     TOMOYO_MAC_ENVIRON);
+	if (!r->mode || !envp_count)
+		return 0;
+	arg_ptr = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
+	if (!arg_ptr)
+		goto out;
+	while (error == -ENOMEM) {
+		if (!tomoyo_dump_page(bprm, pos, &env_page))
+			goto out;
+		pos += PAGE_SIZE - offset;
+		/* Read. */
+		while (argv_count && offset < PAGE_SIZE) {
+			if (!env_page.data[offset++])
+				argv_count--;
+		}
+		if (argv_count) {
+			offset = 0;
+			continue;
+		}
+		while (offset < PAGE_SIZE) {
+			const unsigned char c = env_page.data[offset++];
+
+			if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) {
+				if (c == '=') {
+					arg_ptr[arg_len++] = '\0';
+				} else if (c == '\\') {
+					arg_ptr[arg_len++] = '\\';
+					arg_ptr[arg_len++] = '\\';
+				} else if (c > ' ' && c < 127) {
+					arg_ptr[arg_len++] = c;
+				} else {
+					arg_ptr[arg_len++] = '\\';
+					arg_ptr[arg_len++] = (c >> 6) + '0';
+					arg_ptr[arg_len++]
+						= ((c >> 3) & 7) + '0';
+					arg_ptr[arg_len++] = (c & 7) + '0';
+				}
+			} else {
+				arg_ptr[arg_len] = '\0';
+			}
+			if (c)
+				continue;
+			if (tomoyo_env_perm(r, arg_ptr)) {
+				error = -EPERM;
+				break;
+			}
+			if (!--envp_count) {
+				error = 0;
+				break;
+			}
+			arg_len = 0;
+		}
+		offset = 0;
+	}
+out:
+	if (r->mode != TOMOYO_CONFIG_ENFORCING)
+		error = 0;
+	kfree(env_page.data);
+	kfree(arg_ptr);
+	return error;
+}
+
+/**
+ * tomoyo_find_next_domain - Find a domain.
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_find_next_domain(struct linux_binprm *bprm)
+{
+	struct tomoyo_domain_info *old_domain = tomoyo_domain();
+	struct tomoyo_domain_info *domain = NULL;
+	const char *original_name = bprm->filename;
+	int retval = -ENOMEM;
+	bool reject_on_transition_failure = false;
+	const struct tomoyo_path_info *candidate;
+	struct tomoyo_path_info exename;
+	struct tomoyo_execve *ee = kzalloc(sizeof(*ee), GFP_NOFS);
+
+	if (!ee)
+		return -ENOMEM;
+	ee->tmp = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
+	if (!ee->tmp) {
+		kfree(ee);
+		return -ENOMEM;
+	}
+	/* ee->dump->data is allocated by tomoyo_dump_page(). */
+	tomoyo_init_request_info(&ee->r, NULL, TOMOYO_MAC_FILE_EXECUTE);
+	ee->r.ee = ee;
+	ee->bprm = bprm;
+	ee->r.obj = &ee->obj;
+	ee->obj.path1 = bprm->file->f_path;
+	/* Get symlink's pathname of program. */
+	retval = -ENOENT;
+	exename.name = tomoyo_realpath_nofollow(original_name);
+	if (!exename.name)
+		goto out;
+	tomoyo_fill_path_info(&exename);
+retry:
+	/* Check 'aggregator' directive. */
+	{
+		struct tomoyo_aggregator *ptr;
+		struct list_head *list =
+			&old_domain->ns->policy_list[TOMOYO_ID_AGGREGATOR];
+		/* Check 'aggregator' directive. */
+		candidate = &exename;
+		list_for_each_entry_rcu(ptr, list, head.list) {
+			if (ptr->head.is_deleted ||
+			    !tomoyo_path_matches_pattern(&exename,
+							 ptr->original_name))
+				continue;
+			candidate = ptr->aggregated_name;
+			break;
+		}
+	}
+
+	/* Check execute permission. */
+	retval = tomoyo_execute_permission(&ee->r, candidate);
+	if (retval == TOMOYO_RETRY_REQUEST)
+		goto retry;
+	if (retval < 0)
+		goto out;
+	/*
+	 * To be able to specify domainnames with wildcards, use the
+	 * pathname specified in the policy (which may contain
+	 * wildcard) rather than the pathname passed to execve()
+	 * (which never contains wildcard).
+	 */
+	if (ee->r.param.path.matched_path)
+		candidate = ee->r.param.path.matched_path;
+
+	/*
+	 * Check for domain transition preference if "file execute" matched.
+	 * If preference is given, make do_execve() fail if domain transition
+	 * has failed, for domain transition preference should be used with
+	 * destination domain defined.
+	 */
+	if (ee->transition) {
+		const char *domainname = ee->transition->name;
+		reject_on_transition_failure = true;
+		if (!strcmp(domainname, "keep"))
+			goto force_keep_domain;
+		if (!strcmp(domainname, "child"))
+			goto force_child_domain;
+		if (!strcmp(domainname, "reset"))
+			goto force_reset_domain;
+		if (!strcmp(domainname, "initialize"))
+			goto force_initialize_domain;
+		if (!strcmp(domainname, "parent")) {
+			char *cp;
+			strncpy(ee->tmp, old_domain->domainname->name,
+				TOMOYO_EXEC_TMPSIZE - 1);
+			cp = strrchr(ee->tmp, ' ');
+			if (cp)
+				*cp = '\0';
+		} else if (*domainname == '<')
+			strncpy(ee->tmp, domainname, TOMOYO_EXEC_TMPSIZE - 1);
+		else
+			snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+				 old_domain->domainname->name, domainname);
+		goto force_jump_domain;
+	}
+	/*
+	 * No domain transition preference specified.
+	 * Calculate domain to transit to.
+	 */
+	switch (tomoyo_transition_type(old_domain->ns, old_domain->domainname,
+				       candidate)) {
+	case TOMOYO_TRANSITION_CONTROL_RESET:
+force_reset_domain:
+		/* Transit to the root of specified namespace. */
+		snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>",
+			 candidate->name);
+		/*
+		 * Make do_execve() fail if domain transition across namespaces
+		 * has failed.
+		 */
+		reject_on_transition_failure = true;
+		break;
+	case TOMOYO_TRANSITION_CONTROL_INITIALIZE:
+force_initialize_domain:
+		/* Transit to the child of current namespace's root. */
+		snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+			 old_domain->ns->name, candidate->name);
+		break;
+	case TOMOYO_TRANSITION_CONTROL_KEEP:
+force_keep_domain:
+		/* Keep current domain. */
+		domain = old_domain;
+		break;
+	default:
+		if (old_domain == &tomoyo_kernel_domain &&
+		    !tomoyo_policy_loaded) {
+			/*
+			 * Needn't to transit from kernel domain before
+			 * starting /sbin/init. But transit from kernel domain
+			 * if executing initializers because they might start
+			 * before /sbin/init.
+			 */
+			domain = old_domain;
+			break;
+		}
+force_child_domain:
+		/* Normal domain transition. */
+		snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+			 old_domain->domainname->name, candidate->name);
+		break;
+	}
+force_jump_domain:
+	if (!domain)
+		domain = tomoyo_assign_domain(ee->tmp, true);
+	if (domain)
+		retval = 0;
+	else if (reject_on_transition_failure) {
+		printk(KERN_WARNING "ERROR: Domain '%s' not ready.\n",
+		       ee->tmp);
+		retval = -ENOMEM;
+	} else if (ee->r.mode == TOMOYO_CONFIG_ENFORCING)
+		retval = -ENOMEM;
+	else {
+		retval = 0;
+		if (!old_domain->flags[TOMOYO_DIF_TRANSITION_FAILED]) {
+			old_domain->flags[TOMOYO_DIF_TRANSITION_FAILED] = true;
+			ee->r.granted = false;
+			tomoyo_write_log(&ee->r, "%s", tomoyo_dif
+					 [TOMOYO_DIF_TRANSITION_FAILED]);
+			printk(KERN_WARNING
+			       "ERROR: Domain '%s' not defined.\n", ee->tmp);
+		}
+	}
+ out:
+	if (!domain)
+		domain = old_domain;
+	/* Update reference count on "struct tomoyo_domain_info". */
+	atomic_inc(&domain->users);
+	bprm->cred->security = domain;
+	kfree(exename.name);
+	if (!retval) {
+		ee->r.domain = domain;
+		retval = tomoyo_environ(ee);
+	}
+	kfree(ee->tmp);
+	kfree(ee->dump.data);
+	kfree(ee);
+	return retval;
+}
+
+/**
+ * tomoyo_dump_page - Dump a page to buffer.
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ * @pos:  Location to dump.
+ * @dump: Poiner to "struct tomoyo_page_dump".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
+		      struct tomoyo_page_dump *dump)
+{
+	struct page *page;
+
+	/* dump->data is released by tomoyo_find_next_domain(). */
+	if (!dump->data) {
+		dump->data = kzalloc(PAGE_SIZE, GFP_NOFS);
+		if (!dump->data)
+			return false;
+	}
+	/* Same with get_arg_page(bprm, pos, 0) in fs/exec.c */
+#ifdef CONFIG_MMU
+	if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0)
+		return false;
+#else
+	page = bprm->page[pos / PAGE_SIZE];
+#endif
+	if (page != dump->page) {
+		const unsigned int offset = pos % PAGE_SIZE;
+		/*
+		 * Maybe kmap()/kunmap() should be used here.
+		 * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic().
+		 * So do I.
+		 */
+		char *kaddr = kmap_atomic(page);
+
+		dump->page = page;
+		memcpy(dump->data + offset, kaddr + offset,
+		       PAGE_SIZE - offset);
+		kunmap_atomic(kaddr);
+	}
+	/* Same with put_arg_page(page) in fs/exec.c */
+#ifdef CONFIG_MMU
+	put_page(page);
+#endif
+	return true;
+}
diff --git a/security/tomoyo/environ.c b/security/tomoyo/environ.c
new file mode 100644
index 0000000..ad4c6e1
--- /dev/null
+++ b/security/tomoyo/environ.c
@@ -0,0 +1,122 @@
+/*
+ * security/tomoyo/environ.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include "common.h"
+
+/**
+ * tomoyo_check_env_acl - Check permission for environment variable's name.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_env_acl(struct tomoyo_request_info *r,
+				 const struct tomoyo_acl_info *ptr)
+{
+	const struct tomoyo_env_acl *acl =
+		container_of(ptr, typeof(*acl), head);
+
+	return tomoyo_path_matches_pattern(r->param.environ.name, acl->env);
+}
+
+/**
+ * tomoyo_audit_env_log - Audit environment variable name log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_env_log(struct tomoyo_request_info *r)
+{
+	return tomoyo_supervisor(r, "misc env %s\n",
+				 r->param.environ.name->name);
+}
+
+/**
+ * tomoyo_env_perm - Check permission for environment variable's name.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @env: The name of environment variable.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env)
+{
+	struct tomoyo_path_info environ;
+	int error;
+
+	if (!env || !*env)
+		return 0;
+	environ.name = env;
+	tomoyo_fill_path_info(&environ);
+	r->param_type = TOMOYO_TYPE_ENV_ACL;
+	r->param.environ.name = &environ;
+	do {
+		tomoyo_check_acl(r, tomoyo_check_env_acl);
+		error = tomoyo_audit_env_log(r);
+	} while (error == TOMOYO_RETRY_REQUEST);
+	return error;
+}
+
+/**
+ * tomoyo_same_env_acl - Check for duplicated "struct tomoyo_env_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_env_acl(const struct tomoyo_acl_info *a,
+				const struct tomoyo_acl_info *b)
+{
+	const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head);
+	const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head);
+
+	return p1->env == p2->env;
+}
+
+/**
+ * tomoyo_write_env - Write "struct tomoyo_env_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_env(struct tomoyo_acl_param *param)
+{
+	struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL };
+	int error = -ENOMEM;
+	const char *data = tomoyo_read_token(param);
+
+	if (!tomoyo_correct_word(data) || strchr(data, '='))
+		return -EINVAL;
+	e.env = tomoyo_get_name(data);
+	if (!e.env)
+		return error;
+	error = tomoyo_update_domain(&e.head, sizeof(e), param,
+				  tomoyo_same_env_acl, NULL);
+	tomoyo_put_name(e.env);
+	return error;
+}
+
+/**
+ * tomoyo_write_misc - Update environment variable list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_misc(struct tomoyo_acl_param *param)
+{
+	if (tomoyo_str_starts(&param->data, "env "))
+		return tomoyo_write_env(param);
+	return -EINVAL;
+}
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
new file mode 100644
index 0000000..2367b10
--- /dev/null
+++ b/security/tomoyo/file.c
@@ -0,0 +1,1024 @@
+/*
+ * security/tomoyo/file.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/*
+ * Mapping table from "enum tomoyo_path_acl_index" to "enum tomoyo_mac_index".
+ */
+static const u8 tomoyo_p2mac[TOMOYO_MAX_PATH_OPERATION] = {
+	[TOMOYO_TYPE_EXECUTE]    = TOMOYO_MAC_FILE_EXECUTE,
+	[TOMOYO_TYPE_READ]       = TOMOYO_MAC_FILE_OPEN,
+	[TOMOYO_TYPE_WRITE]      = TOMOYO_MAC_FILE_OPEN,
+	[TOMOYO_TYPE_APPEND]     = TOMOYO_MAC_FILE_OPEN,
+	[TOMOYO_TYPE_UNLINK]     = TOMOYO_MAC_FILE_UNLINK,
+	[TOMOYO_TYPE_GETATTR]    = TOMOYO_MAC_FILE_GETATTR,
+	[TOMOYO_TYPE_RMDIR]      = TOMOYO_MAC_FILE_RMDIR,
+	[TOMOYO_TYPE_TRUNCATE]   = TOMOYO_MAC_FILE_TRUNCATE,
+	[TOMOYO_TYPE_SYMLINK]    = TOMOYO_MAC_FILE_SYMLINK,
+	[TOMOYO_TYPE_CHROOT]     = TOMOYO_MAC_FILE_CHROOT,
+	[TOMOYO_TYPE_UMOUNT]     = TOMOYO_MAC_FILE_UMOUNT,
+};
+
+/*
+ * Mapping table from "enum tomoyo_mkdev_acl_index" to "enum tomoyo_mac_index".
+ */
+const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = {
+	[TOMOYO_TYPE_MKBLOCK] = TOMOYO_MAC_FILE_MKBLOCK,
+	[TOMOYO_TYPE_MKCHAR]  = TOMOYO_MAC_FILE_MKCHAR,
+};
+
+/*
+ * Mapping table from "enum tomoyo_path2_acl_index" to "enum tomoyo_mac_index".
+ */
+const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = {
+	[TOMOYO_TYPE_LINK]       = TOMOYO_MAC_FILE_LINK,
+	[TOMOYO_TYPE_RENAME]     = TOMOYO_MAC_FILE_RENAME,
+	[TOMOYO_TYPE_PIVOT_ROOT] = TOMOYO_MAC_FILE_PIVOT_ROOT,
+};
+
+/*
+ * Mapping table from "enum tomoyo_path_number_acl_index" to
+ * "enum tomoyo_mac_index".
+ */
+const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = {
+	[TOMOYO_TYPE_CREATE] = TOMOYO_MAC_FILE_CREATE,
+	[TOMOYO_TYPE_MKDIR]  = TOMOYO_MAC_FILE_MKDIR,
+	[TOMOYO_TYPE_MKFIFO] = TOMOYO_MAC_FILE_MKFIFO,
+	[TOMOYO_TYPE_MKSOCK] = TOMOYO_MAC_FILE_MKSOCK,
+	[TOMOYO_TYPE_IOCTL]  = TOMOYO_MAC_FILE_IOCTL,
+	[TOMOYO_TYPE_CHMOD]  = TOMOYO_MAC_FILE_CHMOD,
+	[TOMOYO_TYPE_CHOWN]  = TOMOYO_MAC_FILE_CHOWN,
+	[TOMOYO_TYPE_CHGRP]  = TOMOYO_MAC_FILE_CHGRP,
+};
+
+/**
+ * tomoyo_put_name_union - Drop reference on "struct tomoyo_name_union".
+ *
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns nothing.
+ */
+void tomoyo_put_name_union(struct tomoyo_name_union *ptr)
+{
+	tomoyo_put_group(ptr->group);
+	tomoyo_put_name(ptr->filename);
+}
+
+/**
+ * tomoyo_compare_name_union - Check whether a name matches "struct tomoyo_name_union" or not.
+ *
+ * @name: Pointer to "struct tomoyo_path_info".
+ * @ptr:  Pointer to "struct tomoyo_name_union".
+ *
+ * Returns "struct tomoyo_path_info" if @name matches @ptr, NULL otherwise.
+ */
+const struct tomoyo_path_info *
+tomoyo_compare_name_union(const struct tomoyo_path_info *name,
+			  const struct tomoyo_name_union *ptr)
+{
+	if (ptr->group)
+		return tomoyo_path_matches_group(name, ptr->group);
+	if (tomoyo_path_matches_pattern(name, ptr->filename))
+		return ptr->filename;
+	return NULL;
+}
+
+/**
+ * tomoyo_put_number_union - Drop reference on "struct tomoyo_number_union".
+ *
+ * @ptr: Pointer to "struct tomoyo_number_union".
+ *
+ * Returns nothing.
+ */
+void tomoyo_put_number_union(struct tomoyo_number_union *ptr)
+{
+	tomoyo_put_group(ptr->group);
+}
+
+/**
+ * tomoyo_compare_number_union - Check whether a value matches "struct tomoyo_number_union" or not.
+ *
+ * @value: Number to check.
+ * @ptr:   Pointer to "struct tomoyo_number_union".
+ *
+ * Returns true if @value matches @ptr, false otherwise.
+ */
+bool tomoyo_compare_number_union(const unsigned long value,
+				 const struct tomoyo_number_union *ptr)
+{
+	if (ptr->group)
+		return tomoyo_number_matches_group(value, value, ptr->group);
+	return value >= ptr->values[0] && value <= ptr->values[1];
+}
+
+/**
+ * tomoyo_add_slash - Add trailing '/' if needed.
+ *
+ * @buf: Pointer to "struct tomoyo_path_info".
+ *
+ * Returns nothing.
+ *
+ * @buf must be generated by tomoyo_encode() because this function does not
+ * allocate memory for adding '/'.
+ */
+static void tomoyo_add_slash(struct tomoyo_path_info *buf)
+{
+	if (buf->is_dir)
+		return;
+	/*
+	 * This is OK because tomoyo_encode() reserves space for appending "/".
+	 */
+	strcat((char *) buf->name, "/");
+	tomoyo_fill_path_info(buf);
+}
+
+/**
+ * tomoyo_get_realpath - Get realpath.
+ *
+ * @buf:  Pointer to "struct tomoyo_path_info".
+ * @path: Pointer to "struct path".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, const struct path *path)
+{
+	buf->name = tomoyo_realpath_from_path(path);
+	if (buf->name) {
+		tomoyo_fill_path_info(buf);
+		return true;
+	}
+	return false;
+}
+
+/**
+ * tomoyo_audit_path_log - Audit path request log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_path_log(struct tomoyo_request_info *r)
+{
+	return tomoyo_supervisor(r, "file %s %s\n", tomoyo_path_keyword
+				 [r->param.path.operation],
+				 r->param.path.filename->name);
+}
+
+/**
+ * tomoyo_audit_path2_log - Audit path/path request log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_path2_log(struct tomoyo_request_info *r)
+{
+	return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords
+				 [tomoyo_pp2mac[r->param.path2.operation]],
+				 r->param.path2.filename1->name,
+				 r->param.path2.filename2->name);
+}
+
+/**
+ * tomoyo_audit_mkdev_log - Audit path/number/number/number request log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r)
+{
+	return tomoyo_supervisor(r, "file %s %s 0%o %u %u\n",
+				 tomoyo_mac_keywords
+				 [tomoyo_pnnn2mac[r->param.mkdev.operation]],
+				 r->param.mkdev.filename->name,
+				 r->param.mkdev.mode, r->param.mkdev.major,
+				 r->param.mkdev.minor);
+}
+
+/**
+ * tomoyo_audit_path_number_log - Audit path/number request log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r)
+{
+	const u8 type = r->param.path_number.operation;
+	u8 radix;
+	char buffer[64];
+	switch (type) {
+	case TOMOYO_TYPE_CREATE:
+	case TOMOYO_TYPE_MKDIR:
+	case TOMOYO_TYPE_MKFIFO:
+	case TOMOYO_TYPE_MKSOCK:
+	case TOMOYO_TYPE_CHMOD:
+		radix = TOMOYO_VALUE_TYPE_OCTAL;
+		break;
+	case TOMOYO_TYPE_IOCTL:
+		radix = TOMOYO_VALUE_TYPE_HEXADECIMAL;
+		break;
+	default:
+		radix = TOMOYO_VALUE_TYPE_DECIMAL;
+		break;
+	}
+	tomoyo_print_ulong(buffer, sizeof(buffer), r->param.path_number.number,
+			   radix);
+	return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords
+				 [tomoyo_pn2mac[type]],
+				 r->param.path_number.filename->name, buffer);
+}
+
+/**
+ * tomoyo_check_path_acl - Check permission for path operation.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ *
+ * To be able to use wildcard for domain transition, this function sets
+ * matching entry on success. Since the caller holds tomoyo_read_lock(),
+ * it is safe to set matching entry.
+ */
+static bool tomoyo_check_path_acl(struct tomoyo_request_info *r,
+				  const struct tomoyo_acl_info *ptr)
+{
+	const struct tomoyo_path_acl *acl = container_of(ptr, typeof(*acl),
+							 head);
+	if (acl->perm & (1 << r->param.path.operation)) {
+		r->param.path.matched_path =
+			tomoyo_compare_name_union(r->param.path.filename,
+						  &acl->name);
+		return r->param.path.matched_path != NULL;
+	}
+	return false;
+}
+
+/**
+ * tomoyo_check_path_number_acl - Check permission for path number operation.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_path_number_acl(struct tomoyo_request_info *r,
+					 const struct tomoyo_acl_info *ptr)
+{
+	const struct tomoyo_path_number_acl *acl =
+		container_of(ptr, typeof(*acl), head);
+	return (acl->perm & (1 << r->param.path_number.operation)) &&
+		tomoyo_compare_number_union(r->param.path_number.number,
+					    &acl->number) &&
+		tomoyo_compare_name_union(r->param.path_number.filename,
+					  &acl->name);
+}
+
+/**
+ * tomoyo_check_path2_acl - Check permission for path path operation.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_path2_acl(struct tomoyo_request_info *r,
+				   const struct tomoyo_acl_info *ptr)
+{
+	const struct tomoyo_path2_acl *acl =
+		container_of(ptr, typeof(*acl), head);
+	return (acl->perm & (1 << r->param.path2.operation)) &&
+		tomoyo_compare_name_union(r->param.path2.filename1, &acl->name1)
+		&& tomoyo_compare_name_union(r->param.path2.filename2,
+					     &acl->name2);
+}
+
+/**
+ * tomoyo_check_mkdev_acl - Check permission for path number number number operation.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_mkdev_acl(struct tomoyo_request_info *r,
+				   const struct tomoyo_acl_info *ptr)
+{
+	const struct tomoyo_mkdev_acl *acl =
+		container_of(ptr, typeof(*acl), head);
+	return (acl->perm & (1 << r->param.mkdev.operation)) &&
+		tomoyo_compare_number_union(r->param.mkdev.mode,
+					    &acl->mode) &&
+		tomoyo_compare_number_union(r->param.mkdev.major,
+					    &acl->major) &&
+		tomoyo_compare_number_union(r->param.mkdev.minor,
+					    &acl->minor) &&
+		tomoyo_compare_name_union(r->param.mkdev.filename,
+					  &acl->name);
+}
+
+/**
+ * tomoyo_same_path_acl - Check for duplicated "struct tomoyo_path_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_path_acl(const struct tomoyo_acl_info *a,
+				 const struct tomoyo_acl_info *b)
+{
+	const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head);
+	const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head);
+	return tomoyo_same_name_union(&p1->name, &p2->name);
+}
+
+/**
+ * tomoyo_merge_path_acl - Merge duplicated "struct tomoyo_path_acl" entry.
+ *
+ * @a:         Pointer to "struct tomoyo_acl_info".
+ * @b:         Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a,
+				  struct tomoyo_acl_info *b,
+				  const bool is_delete)
+{
+	u16 * const a_perm = &container_of(a, struct tomoyo_path_acl, head)
+		->perm;
+	u16 perm = *a_perm;
+	const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm;
+	if (is_delete)
+		perm &= ~b_perm;
+	else
+		perm |= b_perm;
+	*a_perm = perm;
+	return !perm;
+}
+
+/**
+ * tomoyo_update_path_acl - Update "struct tomoyo_path_acl" list.
+ *
+ * @perm:  Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_path_acl(const u16 perm,
+				  struct tomoyo_acl_param *param)
+{
+	struct tomoyo_path_acl e = {
+		.head.type = TOMOYO_TYPE_PATH_ACL,
+		.perm = perm
+	};
+	int error;
+	if (!tomoyo_parse_name_union(param, &e.name))
+		error = -EINVAL;
+	else
+		error = tomoyo_update_domain(&e.head, sizeof(e), param,
+					     tomoyo_same_path_acl,
+					     tomoyo_merge_path_acl);
+	tomoyo_put_name_union(&e.name);
+	return error;
+}
+
+/**
+ * tomoyo_same_mkdev_acl - Check for duplicated "struct tomoyo_mkdev_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_mkdev_acl(const struct tomoyo_acl_info *a,
+					 const struct tomoyo_acl_info *b)
+{
+	const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head);
+	const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head);
+	return tomoyo_same_name_union(&p1->name, &p2->name) &&
+		tomoyo_same_number_union(&p1->mode, &p2->mode) &&
+		tomoyo_same_number_union(&p1->major, &p2->major) &&
+		tomoyo_same_number_union(&p1->minor, &p2->minor);
+}
+
+/**
+ * tomoyo_merge_mkdev_acl - Merge duplicated "struct tomoyo_mkdev_acl" entry.
+ *
+ * @a:         Pointer to "struct tomoyo_acl_info".
+ * @b:         Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a,
+				   struct tomoyo_acl_info *b,
+				   const bool is_delete)
+{
+	u8 *const a_perm = &container_of(a, struct tomoyo_mkdev_acl,
+					 head)->perm;
+	u8 perm = *a_perm;
+	const u8 b_perm = container_of(b, struct tomoyo_mkdev_acl, head)
+		->perm;
+	if (is_delete)
+		perm &= ~b_perm;
+	else
+		perm |= b_perm;
+	*a_perm = perm;
+	return !perm;
+}
+
+/**
+ * tomoyo_update_mkdev_acl - Update "struct tomoyo_mkdev_acl" list.
+ *
+ * @perm:  Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_mkdev_acl(const u8 perm,
+				   struct tomoyo_acl_param *param)
+{
+	struct tomoyo_mkdev_acl e = {
+		.head.type = TOMOYO_TYPE_MKDEV_ACL,
+		.perm = perm
+	};
+	int error;
+	if (!tomoyo_parse_name_union(param, &e.name) ||
+	    !tomoyo_parse_number_union(param, &e.mode) ||
+	    !tomoyo_parse_number_union(param, &e.major) ||
+	    !tomoyo_parse_number_union(param, &e.minor))
+		error = -EINVAL;
+	else
+		error = tomoyo_update_domain(&e.head, sizeof(e), param,
+					     tomoyo_same_mkdev_acl,
+					     tomoyo_merge_mkdev_acl);
+	tomoyo_put_name_union(&e.name);
+	tomoyo_put_number_union(&e.mode);
+	tomoyo_put_number_union(&e.major);
+	tomoyo_put_number_union(&e.minor);
+	return error;
+}
+
+/**
+ * tomoyo_same_path2_acl - Check for duplicated "struct tomoyo_path2_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_path2_acl(const struct tomoyo_acl_info *a,
+				  const struct tomoyo_acl_info *b)
+{
+	const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head);
+	const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head);
+	return tomoyo_same_name_union(&p1->name1, &p2->name1) &&
+		tomoyo_same_name_union(&p1->name2, &p2->name2);
+}
+
+/**
+ * tomoyo_merge_path2_acl - Merge duplicated "struct tomoyo_path2_acl" entry.
+ *
+ * @a:         Pointer to "struct tomoyo_acl_info".
+ * @b:         Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a,
+				   struct tomoyo_acl_info *b,
+				   const bool is_delete)
+{
+	u8 * const a_perm = &container_of(a, struct tomoyo_path2_acl, head)
+		->perm;
+	u8 perm = *a_perm;
+	const u8 b_perm = container_of(b, struct tomoyo_path2_acl, head)->perm;
+	if (is_delete)
+		perm &= ~b_perm;
+	else
+		perm |= b_perm;
+	*a_perm = perm;
+	return !perm;
+}
+
+/**
+ * tomoyo_update_path2_acl - Update "struct tomoyo_path2_acl" list.
+ *
+ * @perm:  Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_path2_acl(const u8 perm,
+				   struct tomoyo_acl_param *param)
+{
+	struct tomoyo_path2_acl e = {
+		.head.type = TOMOYO_TYPE_PATH2_ACL,
+		.perm = perm
+	};
+	int error;
+	if (!tomoyo_parse_name_union(param, &e.name1) ||
+	    !tomoyo_parse_name_union(param, &e.name2))
+		error = -EINVAL;
+	else
+		error = tomoyo_update_domain(&e.head, sizeof(e), param,
+					     tomoyo_same_path2_acl,
+					     tomoyo_merge_path2_acl);
+	tomoyo_put_name_union(&e.name1);
+	tomoyo_put_name_union(&e.name2);
+	return error;
+}
+
+/**
+ * tomoyo_path_permission - Check permission for single path operation.
+ *
+ * @r:         Pointer to "struct tomoyo_request_info".
+ * @operation: Type of operation.
+ * @filename:  Filename to check.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
+				  const struct tomoyo_path_info *filename)
+{
+	int error;
+
+	r->type = tomoyo_p2mac[operation];
+	r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
+	if (r->mode == TOMOYO_CONFIG_DISABLED)
+		return 0;
+	r->param_type = TOMOYO_TYPE_PATH_ACL;
+	r->param.path.filename = filename;
+	r->param.path.operation = operation;
+	do {
+		tomoyo_check_acl(r, tomoyo_check_path_acl);
+		error = tomoyo_audit_path_log(r);
+	} while (error == TOMOYO_RETRY_REQUEST);
+	return error;
+}
+
+/**
+ * tomoyo_execute_permission - Check permission for execute operation.
+ *
+ * @r:         Pointer to "struct tomoyo_request_info".
+ * @filename:  Filename to check.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+			      const struct tomoyo_path_info *filename)
+{
+	/*
+	 * Unlike other permission checks, this check is done regardless of
+	 * profile mode settings in order to check for domain transition
+	 * preference.
+	 */
+	r->type = TOMOYO_MAC_FILE_EXECUTE;
+	r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
+	r->param_type = TOMOYO_TYPE_PATH_ACL;
+	r->param.path.filename = filename;
+	r->param.path.operation = TOMOYO_TYPE_EXECUTE;
+	tomoyo_check_acl(r, tomoyo_check_path_acl);
+	r->ee->transition = r->matched_acl && r->matched_acl->cond ?
+		r->matched_acl->cond->transit : NULL;
+	if (r->mode != TOMOYO_CONFIG_DISABLED)
+		return tomoyo_audit_path_log(r);
+	return 0;
+}
+
+/**
+ * tomoyo_same_path_number_acl - Check for duplicated "struct tomoyo_path_number_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_path_number_acl(const struct tomoyo_acl_info *a,
+					const struct tomoyo_acl_info *b)
+{
+	const struct tomoyo_path_number_acl *p1 = container_of(a, typeof(*p1),
+							       head);
+	const struct tomoyo_path_number_acl *p2 = container_of(b, typeof(*p2),
+							       head);
+	return tomoyo_same_name_union(&p1->name, &p2->name) &&
+		tomoyo_same_number_union(&p1->number, &p2->number);
+}
+
+/**
+ * tomoyo_merge_path_number_acl - Merge duplicated "struct tomoyo_path_number_acl" entry.
+ *
+ * @a:         Pointer to "struct tomoyo_acl_info".
+ * @b:         Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a,
+					 struct tomoyo_acl_info *b,
+					 const bool is_delete)
+{
+	u8 * const a_perm = &container_of(a, struct tomoyo_path_number_acl,
+					  head)->perm;
+	u8 perm = *a_perm;
+	const u8 b_perm = container_of(b, struct tomoyo_path_number_acl, head)
+		->perm;
+	if (is_delete)
+		perm &= ~b_perm;
+	else
+		perm |= b_perm;
+	*a_perm = perm;
+	return !perm;
+}
+
+/**
+ * tomoyo_update_path_number_acl - Update ioctl/chmod/chown/chgrp ACL.
+ *
+ * @perm:  Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_update_path_number_acl(const u8 perm,
+					 struct tomoyo_acl_param *param)
+{
+	struct tomoyo_path_number_acl e = {
+		.head.type = TOMOYO_TYPE_PATH_NUMBER_ACL,
+		.perm = perm
+	};
+	int error;
+	if (!tomoyo_parse_name_union(param, &e.name) ||
+	    !tomoyo_parse_number_union(param, &e.number))
+		error = -EINVAL;
+	else
+		error = tomoyo_update_domain(&e.head, sizeof(e), param,
+					     tomoyo_same_path_number_acl,
+					     tomoyo_merge_path_number_acl);
+	tomoyo_put_name_union(&e.name);
+	tomoyo_put_number_union(&e.number);
+	return error;
+}
+
+/**
+ * tomoyo_path_number_perm - Check permission for "create", "mkdir", "mkfifo", "mksock", "ioctl", "chmod", "chown", "chgrp".
+ *
+ * @type:   Type of operation.
+ * @path:   Pointer to "struct path".
+ * @number: Number.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_path_number_perm(const u8 type, struct path *path,
+			    unsigned long number)
+{
+	struct tomoyo_request_info r;
+	struct tomoyo_obj_info obj = {
+		.path1 = *path,
+	};
+	int error = -ENOMEM;
+	struct tomoyo_path_info buf;
+	int idx;
+
+	if (tomoyo_init_request_info(&r, NULL, tomoyo_pn2mac[type])
+	    == TOMOYO_CONFIG_DISABLED || !path->dentry)
+		return 0;
+	idx = tomoyo_read_lock();
+	if (!tomoyo_get_realpath(&buf, path))
+		goto out;
+	r.obj = &obj;
+	if (type == TOMOYO_TYPE_MKDIR)
+		tomoyo_add_slash(&buf);
+	r.param_type = TOMOYO_TYPE_PATH_NUMBER_ACL;
+	r.param.path_number.operation = type;
+	r.param.path_number.filename = &buf;
+	r.param.path_number.number = number;
+	do {
+		tomoyo_check_acl(&r, tomoyo_check_path_number_acl);
+		error = tomoyo_audit_path_number_log(&r);
+	} while (error == TOMOYO_RETRY_REQUEST);
+	kfree(buf.name);
+ out:
+	tomoyo_read_unlock(idx);
+	if (r.mode != TOMOYO_CONFIG_ENFORCING)
+		error = 0;
+	return error;
+}
+
+/**
+ * tomoyo_check_open_permission - Check permission for "read" and "write".
+ *
+ * @domain: Pointer to "struct tomoyo_domain_info".
+ * @path:   Pointer to "struct path".
+ * @flag:   Flags for open().
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
+				 struct path *path, const int flag)
+{
+	const u8 acc_mode = ACC_MODE(flag);
+	int error = 0;
+	struct tomoyo_path_info buf;
+	struct tomoyo_request_info r;
+	struct tomoyo_obj_info obj = {
+		.path1 = *path,
+	};
+	int idx;
+
+	buf.name = NULL;
+	r.mode = TOMOYO_CONFIG_DISABLED;
+	idx = tomoyo_read_lock();
+	if (acc_mode &&
+	    tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_OPEN)
+	    != TOMOYO_CONFIG_DISABLED) {
+		if (!tomoyo_get_realpath(&buf, path)) {
+			error = -ENOMEM;
+			goto out;
+		}
+		r.obj = &obj;
+		if (acc_mode & MAY_READ)
+			error = tomoyo_path_permission(&r, TOMOYO_TYPE_READ,
+						       &buf);
+		if (!error && (acc_mode & MAY_WRITE))
+			error = tomoyo_path_permission(&r, (flag & O_APPEND) ?
+						       TOMOYO_TYPE_APPEND :
+						       TOMOYO_TYPE_WRITE,
+						       &buf);
+	}
+ out:
+	kfree(buf.name);
+	tomoyo_read_unlock(idx);
+	if (r.mode != TOMOYO_CONFIG_ENFORCING)
+		error = 0;
+	return error;
+}
+
+/**
+ * tomoyo_path_perm - Check permission for "unlink", "rmdir", "truncate", "symlink", "append", "chroot" and "unmount".
+ *
+ * @operation: Type of operation.
+ * @path:      Pointer to "struct path".
+ * @target:    Symlink's target if @operation is TOMOYO_TYPE_SYMLINK,
+ *             NULL otherwise.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_path_perm(const u8 operation, const struct path *path, const char *target)
+{
+	struct tomoyo_request_info r;
+	struct tomoyo_obj_info obj = {
+		.path1 = *path,
+	};
+	int error;
+	struct tomoyo_path_info buf;
+	bool is_enforce;
+	struct tomoyo_path_info symlink_target;
+	int idx;
+
+	if (tomoyo_init_request_info(&r, NULL, tomoyo_p2mac[operation])
+	    == TOMOYO_CONFIG_DISABLED)
+		return 0;
+	is_enforce = (r.mode == TOMOYO_CONFIG_ENFORCING);
+	error = -ENOMEM;
+	buf.name = NULL;
+	idx = tomoyo_read_lock();
+	if (!tomoyo_get_realpath(&buf, path))
+		goto out;
+	r.obj = &obj;
+	switch (operation) {
+	case TOMOYO_TYPE_RMDIR:
+	case TOMOYO_TYPE_CHROOT:
+		tomoyo_add_slash(&buf);
+		break;
+	case TOMOYO_TYPE_SYMLINK:
+		symlink_target.name = tomoyo_encode(target);
+		if (!symlink_target.name)
+			goto out;
+		tomoyo_fill_path_info(&symlink_target);
+		obj.symlink_target = &symlink_target;
+		break;
+	}
+	error = tomoyo_path_permission(&r, operation, &buf);
+	if (operation == TOMOYO_TYPE_SYMLINK)
+		kfree(symlink_target.name);
+ out:
+	kfree(buf.name);
+	tomoyo_read_unlock(idx);
+	if (!is_enforce)
+		error = 0;
+	return error;
+}
+
+/**
+ * tomoyo_mkdev_perm - Check permission for "mkblock" and "mkchar".
+ *
+ * @operation: Type of operation. (TOMOYO_TYPE_MKCHAR or TOMOYO_TYPE_MKBLOCK)
+ * @path:      Pointer to "struct path".
+ * @mode:      Create mode.
+ * @dev:       Device number.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_mkdev_perm(const u8 operation, struct path *path,
+		      const unsigned int mode, unsigned int dev)
+{
+	struct tomoyo_request_info r;
+	struct tomoyo_obj_info obj = {
+		.path1 = *path,
+	};
+	int error = -ENOMEM;
+	struct tomoyo_path_info buf;
+	int idx;
+
+	if (tomoyo_init_request_info(&r, NULL, tomoyo_pnnn2mac[operation])
+	    == TOMOYO_CONFIG_DISABLED)
+		return 0;
+	idx = tomoyo_read_lock();
+	error = -ENOMEM;
+	if (tomoyo_get_realpath(&buf, path)) {
+		r.obj = &obj;
+		dev = new_decode_dev(dev);
+		r.param_type = TOMOYO_TYPE_MKDEV_ACL;
+		r.param.mkdev.filename = &buf;
+		r.param.mkdev.operation = operation;
+		r.param.mkdev.mode = mode;
+		r.param.mkdev.major = MAJOR(dev);
+		r.param.mkdev.minor = MINOR(dev);
+		tomoyo_check_acl(&r, tomoyo_check_mkdev_acl);
+		error = tomoyo_audit_mkdev_log(&r);
+		kfree(buf.name);
+	}
+	tomoyo_read_unlock(idx);
+	if (r.mode != TOMOYO_CONFIG_ENFORCING)
+		error = 0;
+	return error;
+}
+
+/**
+ * tomoyo_path2_perm - Check permission for "rename", "link" and "pivot_root".
+ *
+ * @operation: Type of operation.
+ * @path1:      Pointer to "struct path".
+ * @path2:      Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_path2_perm(const u8 operation, struct path *path1,
+		      struct path *path2)
+{
+	int error = -ENOMEM;
+	struct tomoyo_path_info buf1;
+	struct tomoyo_path_info buf2;
+	struct tomoyo_request_info r;
+	struct tomoyo_obj_info obj = {
+		.path1 = *path1,
+		.path2 = *path2,
+	};
+	int idx;
+
+	if (tomoyo_init_request_info(&r, NULL, tomoyo_pp2mac[operation])
+	    == TOMOYO_CONFIG_DISABLED)
+		return 0;
+	buf1.name = NULL;
+	buf2.name = NULL;
+	idx = tomoyo_read_lock();
+	if (!tomoyo_get_realpath(&buf1, path1) ||
+	    !tomoyo_get_realpath(&buf2, path2))
+		goto out;
+	switch (operation) {
+	case TOMOYO_TYPE_RENAME:
+	case TOMOYO_TYPE_LINK:
+		if (!d_is_dir(path1->dentry))
+			break;
+		/* fall through */
+	case TOMOYO_TYPE_PIVOT_ROOT:
+		tomoyo_add_slash(&buf1);
+		tomoyo_add_slash(&buf2);
+		break;
+	}
+	r.obj = &obj;
+	r.param_type = TOMOYO_TYPE_PATH2_ACL;
+	r.param.path2.operation = operation;
+	r.param.path2.filename1 = &buf1;
+	r.param.path2.filename2 = &buf2;
+	do {
+		tomoyo_check_acl(&r, tomoyo_check_path2_acl);
+		error = tomoyo_audit_path2_log(&r);
+	} while (error == TOMOYO_RETRY_REQUEST);
+ out:
+	kfree(buf1.name);
+	kfree(buf2.name);
+	tomoyo_read_unlock(idx);
+	if (r.mode != TOMOYO_CONFIG_ENFORCING)
+		error = 0;
+	return error;
+}
+
+/**
+ * tomoyo_same_mount_acl - Check for duplicated "struct tomoyo_mount_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a,
+				  const struct tomoyo_acl_info *b)
+{
+	const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head);
+	const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head);
+	return tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) &&
+		tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) &&
+		tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) &&
+		tomoyo_same_number_union(&p1->flags, &p2->flags);
+}
+
+/**
+ * tomoyo_update_mount_acl - Write "struct tomoyo_mount_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_mount_acl(struct tomoyo_acl_param *param)
+{
+	struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL };
+	int error;
+	if (!tomoyo_parse_name_union(param, &e.dev_name) ||
+	    !tomoyo_parse_name_union(param, &e.dir_name) ||
+	    !tomoyo_parse_name_union(param, &e.fs_type) ||
+	    !tomoyo_parse_number_union(param, &e.flags))
+		error = -EINVAL;
+	else
+		error = tomoyo_update_domain(&e.head, sizeof(e), param,
+					     tomoyo_same_mount_acl, NULL);
+	tomoyo_put_name_union(&e.dev_name);
+	tomoyo_put_name_union(&e.dir_name);
+	tomoyo_put_name_union(&e.fs_type);
+	tomoyo_put_number_union(&e.flags);
+	return error;
+}
+
+/**
+ * tomoyo_write_file - Update file related list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_file(struct tomoyo_acl_param *param)
+{
+	u16 perm = 0;
+	u8 type;
+	const char *operation = tomoyo_read_token(param);
+	for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++)
+		if (tomoyo_permstr(operation, tomoyo_path_keyword[type]))
+			perm |= 1 << type;
+	if (perm)
+		return tomoyo_update_path_acl(perm, param);
+	for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++)
+		if (tomoyo_permstr(operation,
+				   tomoyo_mac_keywords[tomoyo_pp2mac[type]]))
+			perm |= 1 << type;
+	if (perm)
+		return tomoyo_update_path2_acl(perm, param);
+	for (type = 0; type < TOMOYO_MAX_PATH_NUMBER_OPERATION; type++)
+		if (tomoyo_permstr(operation,
+				   tomoyo_mac_keywords[tomoyo_pn2mac[type]]))
+			perm |= 1 << type;
+	if (perm)
+		return tomoyo_update_path_number_acl(perm, param);
+	for (type = 0; type < TOMOYO_MAX_MKDEV_OPERATION; type++)
+		if (tomoyo_permstr(operation,
+				   tomoyo_mac_keywords[tomoyo_pnnn2mac[type]]))
+			perm |= 1 << type;
+	if (perm)
+		return tomoyo_update_mkdev_acl(perm, param);
+	if (tomoyo_permstr(operation,
+			   tomoyo_mac_keywords[TOMOYO_MAC_FILE_MOUNT]))
+		return tomoyo_update_mount_acl(param);
+	return -EINVAL;
+}
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
new file mode 100644
index 0000000..986a6a7
--- /dev/null
+++ b/security/tomoyo/gc.c
@@ -0,0 +1,655 @@
+/*
+ * security/tomoyo/gc.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+/**
+ * tomoyo_memory_free - Free memory for elements.
+ *
+ * @ptr:  Pointer to allocated memory.
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static inline void tomoyo_memory_free(void *ptr)
+{
+	tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= ksize(ptr);
+	kfree(ptr);
+}
+
+/* The list for "struct tomoyo_io_buffer". */
+static LIST_HEAD(tomoyo_io_buffer_list);
+/* Lock for protecting tomoyo_io_buffer_list. */
+static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock);
+
+/**
+ * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not.
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns true if @element is used by /sys/kernel/security/tomoyo/ users,
+ * false otherwise.
+ */
+static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
+{
+	struct tomoyo_io_buffer *head;
+	bool in_use = false;
+
+	spin_lock(&tomoyo_io_buffer_list_lock);
+	list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
+		head->users++;
+		spin_unlock(&tomoyo_io_buffer_list_lock);
+		mutex_lock(&head->io_sem);
+		if (head->r.domain == element || head->r.group == element ||
+		    head->r.acl == element || &head->w.domain->list == element)
+			in_use = true;
+		mutex_unlock(&head->io_sem);
+		spin_lock(&tomoyo_io_buffer_list_lock);
+		head->users--;
+		if (in_use)
+			break;
+	}
+	spin_unlock(&tomoyo_io_buffer_list_lock);
+	return in_use;
+}
+
+/**
+ * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not.
+ *
+ * @string: String to check.
+ *
+ * Returns true if @string is used by /sys/kernel/security/tomoyo/ users,
+ * false otherwise.
+ */
+static bool tomoyo_name_used_by_io_buffer(const char *string)
+{
+	struct tomoyo_io_buffer *head;
+	const size_t size = strlen(string) + 1;
+	bool in_use = false;
+
+	spin_lock(&tomoyo_io_buffer_list_lock);
+	list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
+		int i;
+		head->users++;
+		spin_unlock(&tomoyo_io_buffer_list_lock);
+		mutex_lock(&head->io_sem);
+		for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
+			const char *w = head->r.w[i];
+			if (w < string || w > string + size)
+				continue;
+			in_use = true;
+			break;
+		}
+		mutex_unlock(&head->io_sem);
+		spin_lock(&tomoyo_io_buffer_list_lock);
+		head->users--;
+		if (in_use)
+			break;
+	}
+	spin_unlock(&tomoyo_io_buffer_list_lock);
+	return in_use;
+}
+
+/**
+ * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_transition_control(struct list_head *element)
+{
+	struct tomoyo_transition_control *ptr =
+		container_of(element, typeof(*ptr), head.list);
+	tomoyo_put_name(ptr->domainname);
+	tomoyo_put_name(ptr->program);
+}
+
+/**
+ * tomoyo_del_aggregator - Delete members in "struct tomoyo_aggregator".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_aggregator(struct list_head *element)
+{
+	struct tomoyo_aggregator *ptr =
+		container_of(element, typeof(*ptr), head.list);
+	tomoyo_put_name(ptr->original_name);
+	tomoyo_put_name(ptr->aggregated_name);
+}
+
+/**
+ * tomoyo_del_manager - Delete members in "struct tomoyo_manager".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_manager(struct list_head *element)
+{
+	struct tomoyo_manager *ptr =
+		container_of(element, typeof(*ptr), head.list);
+	tomoyo_put_name(ptr->manager);
+}
+
+/**
+ * tomoyo_del_acl - Delete members in "struct tomoyo_acl_info".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_del_acl(struct list_head *element)
+{
+	struct tomoyo_acl_info *acl =
+		container_of(element, typeof(*acl), list);
+	tomoyo_put_condition(acl->cond);
+	switch (acl->type) {
+	case TOMOYO_TYPE_PATH_ACL:
+		{
+			struct tomoyo_path_acl *entry
+				= container_of(acl, typeof(*entry), head);
+			tomoyo_put_name_union(&entry->name);
+		}
+		break;
+	case TOMOYO_TYPE_PATH2_ACL:
+		{
+			struct tomoyo_path2_acl *entry
+				= container_of(acl, typeof(*entry), head);
+			tomoyo_put_name_union(&entry->name1);
+			tomoyo_put_name_union(&entry->name2);
+		}
+		break;
+	case TOMOYO_TYPE_PATH_NUMBER_ACL:
+		{
+			struct tomoyo_path_number_acl *entry
+				= container_of(acl, typeof(*entry), head);
+			tomoyo_put_name_union(&entry->name);
+			tomoyo_put_number_union(&entry->number);
+		}
+		break;
+	case TOMOYO_TYPE_MKDEV_ACL:
+		{
+			struct tomoyo_mkdev_acl *entry
+				= container_of(acl, typeof(*entry), head);
+			tomoyo_put_name_union(&entry->name);
+			tomoyo_put_number_union(&entry->mode);
+			tomoyo_put_number_union(&entry->major);
+			tomoyo_put_number_union(&entry->minor);
+		}
+		break;
+	case TOMOYO_TYPE_MOUNT_ACL:
+		{
+			struct tomoyo_mount_acl *entry
+				= container_of(acl, typeof(*entry), head);
+			tomoyo_put_name_union(&entry->dev_name);
+			tomoyo_put_name_union(&entry->dir_name);
+			tomoyo_put_name_union(&entry->fs_type);
+			tomoyo_put_number_union(&entry->flags);
+		}
+		break;
+	case TOMOYO_TYPE_ENV_ACL:
+		{
+			struct tomoyo_env_acl *entry =
+				container_of(acl, typeof(*entry), head);
+
+			tomoyo_put_name(entry->env);
+		}
+		break;
+	case TOMOYO_TYPE_INET_ACL:
+		{
+			struct tomoyo_inet_acl *entry =
+				container_of(acl, typeof(*entry), head);
+
+			tomoyo_put_group(entry->address.group);
+			tomoyo_put_number_union(&entry->port);
+		}
+		break;
+	case TOMOYO_TYPE_UNIX_ACL:
+		{
+			struct tomoyo_unix_acl *entry =
+				container_of(acl, typeof(*entry), head);
+
+			tomoyo_put_name_union(&entry->name);
+		}
+		break;
+	case TOMOYO_TYPE_MANUAL_TASK_ACL:
+		{
+			struct tomoyo_task_acl *entry =
+				container_of(acl, typeof(*entry), head);
+			tomoyo_put_name(entry->domainname);
+		}
+		break;
+	}
+}
+
+/**
+ * tomoyo_del_domain - Delete members in "struct tomoyo_domain_info".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static inline void tomoyo_del_domain(struct list_head *element)
+{
+	struct tomoyo_domain_info *domain =
+		container_of(element, typeof(*domain), list);
+	struct tomoyo_acl_info *acl;
+	struct tomoyo_acl_info *tmp;
+	/*
+	 * Since this domain is referenced from neither
+	 * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete
+	 * elements without checking for is_deleted flag.
+	 */
+	list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
+		tomoyo_del_acl(&acl->list);
+		tomoyo_memory_free(acl);
+	}
+	tomoyo_put_name(domain->domainname);
+}
+
+/**
+ * tomoyo_del_condition - Delete members in "struct tomoyo_condition".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+void tomoyo_del_condition(struct list_head *element)
+{
+	struct tomoyo_condition *cond = container_of(element, typeof(*cond),
+						     head.list);
+	const u16 condc = cond->condc;
+	const u16 numbers_count = cond->numbers_count;
+	const u16 names_count = cond->names_count;
+	const u16 argc = cond->argc;
+	const u16 envc = cond->envc;
+	unsigned int i;
+	const struct tomoyo_condition_element *condp
+		= (const struct tomoyo_condition_element *) (cond + 1);
+	struct tomoyo_number_union *numbers_p
+		= (struct tomoyo_number_union *) (condp + condc);
+	struct tomoyo_name_union *names_p
+		= (struct tomoyo_name_union *) (numbers_p + numbers_count);
+	const struct tomoyo_argv *argv
+		= (const struct tomoyo_argv *) (names_p + names_count);
+	const struct tomoyo_envp *envp
+		= (const struct tomoyo_envp *) (argv + argc);
+	for (i = 0; i < numbers_count; i++)
+		tomoyo_put_number_union(numbers_p++);
+	for (i = 0; i < names_count; i++)
+		tomoyo_put_name_union(names_p++);
+	for (i = 0; i < argc; argv++, i++)
+		tomoyo_put_name(argv->value);
+	for (i = 0; i < envc; envp++, i++) {
+		tomoyo_put_name(envp->name);
+		tomoyo_put_name(envp->value);
+	}
+}
+
+/**
+ * tomoyo_del_name - Delete members in "struct tomoyo_name".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_name(struct list_head *element)
+{
+	/* Nothing to do. */
+}
+
+/**
+ * tomoyo_del_path_group - Delete members in "struct tomoyo_path_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_path_group(struct list_head *element)
+{
+	struct tomoyo_path_group *member =
+		container_of(element, typeof(*member), head.list);
+	tomoyo_put_name(member->member_name);
+}
+
+/**
+ * tomoyo_del_group - Delete "struct tomoyo_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_group(struct list_head *element)
+{
+	struct tomoyo_group *group =
+		container_of(element, typeof(*group), head.list);
+	tomoyo_put_name(group->group_name);
+}
+
+/**
+ * tomoyo_del_address_group - Delete members in "struct tomoyo_address_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_address_group(struct list_head *element)
+{
+	/* Nothing to do. */
+}
+
+/**
+ * tomoyo_del_number_group - Delete members in "struct tomoyo_number_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_number_group(struct list_head *element)
+{
+	/* Nothing to do. */
+}
+
+/**
+ * tomoyo_try_to_gc - Try to kfree() an entry.
+ *
+ * @type:    One of values in "enum tomoyo_policy_id".
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static void tomoyo_try_to_gc(const enum tomoyo_policy_id type,
+			     struct list_head *element)
+{
+	/*
+	 * __list_del_entry() guarantees that the list element became no longer
+	 * reachable from the list which the element was originally on (e.g.
+	 * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the
+	 * list element became no longer referenced by syscall users.
+	 */
+	__list_del_entry(element);
+	mutex_unlock(&tomoyo_policy_lock);
+	synchronize_srcu(&tomoyo_ss);
+	/*
+	 * However, there are two users which may still be using the list
+	 * element. We need to defer until both users forget this element.
+	 *
+	 * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl}
+	 * and "struct tomoyo_io_buffer"->w.domain forget this element.
+	 */
+	if (tomoyo_struct_used_by_io_buffer(element))
+		goto reinject;
+	switch (type) {
+	case TOMOYO_ID_TRANSITION_CONTROL:
+		tomoyo_del_transition_control(element);
+		break;
+	case TOMOYO_ID_MANAGER:
+		tomoyo_del_manager(element);
+		break;
+	case TOMOYO_ID_AGGREGATOR:
+		tomoyo_del_aggregator(element);
+		break;
+	case TOMOYO_ID_GROUP:
+		tomoyo_del_group(element);
+		break;
+	case TOMOYO_ID_PATH_GROUP:
+		tomoyo_del_path_group(element);
+		break;
+	case TOMOYO_ID_ADDRESS_GROUP:
+		tomoyo_del_address_group(element);
+		break;
+	case TOMOYO_ID_NUMBER_GROUP:
+		tomoyo_del_number_group(element);
+		break;
+	case TOMOYO_ID_CONDITION:
+		tomoyo_del_condition(element);
+		break;
+	case TOMOYO_ID_NAME:
+		/*
+		 * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[]
+		 * forget this element.
+		 */
+		if (tomoyo_name_used_by_io_buffer
+		    (container_of(element, typeof(struct tomoyo_name),
+				  head.list)->entry.name))
+			goto reinject;
+		tomoyo_del_name(element);
+		break;
+	case TOMOYO_ID_ACL:
+		tomoyo_del_acl(element);
+		break;
+	case TOMOYO_ID_DOMAIN:
+		/*
+		 * Don't kfree() until all "struct cred"->security forget this
+		 * element.
+		 */
+		if (atomic_read(&container_of
+				(element, typeof(struct tomoyo_domain_info),
+				 list)->users))
+			goto reinject;
+		break;
+	case TOMOYO_MAX_POLICY:
+		break;
+	}
+	mutex_lock(&tomoyo_policy_lock);
+	if (type == TOMOYO_ID_DOMAIN)
+		tomoyo_del_domain(element);
+	tomoyo_memory_free(element);
+	return;
+reinject:
+	/*
+	 * We can safely reinject this element here bacause
+	 * (1) Appending list elements and removing list elements are protected
+	 *     by tomoyo_policy_lock mutex.
+	 * (2) Only this function removes list elements and this function is
+	 *     exclusively executed by tomoyo_gc_mutex mutex.
+	 * are true.
+	 */
+	mutex_lock(&tomoyo_policy_lock);
+	list_add_rcu(element, element->prev);
+}
+
+/**
+ * tomoyo_collect_member - Delete elements with "struct tomoyo_acl_head".
+ *
+ * @id:          One of values in "enum tomoyo_policy_id".
+ * @member_list: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_collect_member(const enum tomoyo_policy_id id,
+				  struct list_head *member_list)
+{
+	struct tomoyo_acl_head *member;
+	struct tomoyo_acl_head *tmp;
+	list_for_each_entry_safe(member, tmp, member_list, list) {
+		if (!member->is_deleted)
+			continue;
+		member->is_deleted = TOMOYO_GC_IN_PROGRESS;
+		tomoyo_try_to_gc(id, &member->list);
+	}
+}
+
+/**
+ * tomoyo_collect_acl - Delete elements in "struct tomoyo_domain_info".
+ *
+ * @list: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_collect_acl(struct list_head *list)
+{
+	struct tomoyo_acl_info *acl;
+	struct tomoyo_acl_info *tmp;
+	list_for_each_entry_safe(acl, tmp, list, list) {
+		if (!acl->is_deleted)
+			continue;
+		acl->is_deleted = TOMOYO_GC_IN_PROGRESS;
+		tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list);
+	}
+}
+
+/**
+ * tomoyo_collect_entry - Try to kfree() deleted elements.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_collect_entry(void)
+{
+	int i;
+	enum tomoyo_policy_id id;
+	struct tomoyo_policy_namespace *ns;
+	mutex_lock(&tomoyo_policy_lock);
+	{
+		struct tomoyo_domain_info *domain;
+		struct tomoyo_domain_info *tmp;
+		list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list,
+					 list) {
+			tomoyo_collect_acl(&domain->acl_info_list);
+			if (!domain->is_deleted || atomic_read(&domain->users))
+				continue;
+			tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list);
+		}
+	}
+	list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
+		for (id = 0; id < TOMOYO_MAX_POLICY; id++)
+			tomoyo_collect_member(id, &ns->policy_list[id]);
+		for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
+			tomoyo_collect_acl(&ns->acl_group[i]);
+	}
+	{
+		struct tomoyo_shared_acl_head *ptr;
+		struct tomoyo_shared_acl_head *tmp;
+		list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list,
+					 list) {
+			if (atomic_read(&ptr->users) > 0)
+				continue;
+			atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+			tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list);
+		}
+	}
+	list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
+		for (i = 0; i < TOMOYO_MAX_GROUP; i++) {
+			struct list_head *list = &ns->group_list[i];
+			struct tomoyo_group *group;
+			struct tomoyo_group *tmp;
+			switch (i) {
+			case 0:
+				id = TOMOYO_ID_PATH_GROUP;
+				break;
+			case 1:
+				id = TOMOYO_ID_NUMBER_GROUP;
+				break;
+			default:
+				id = TOMOYO_ID_ADDRESS_GROUP;
+				break;
+			}
+			list_for_each_entry_safe(group, tmp, list, head.list) {
+				tomoyo_collect_member(id, &group->member_list);
+				if (!list_empty(&group->member_list) ||
+				    atomic_read(&group->head.users) > 0)
+					continue;
+				atomic_set(&group->head.users,
+					   TOMOYO_GC_IN_PROGRESS);
+				tomoyo_try_to_gc(TOMOYO_ID_GROUP,
+						 &group->head.list);
+			}
+		}
+	}
+	for (i = 0; i < TOMOYO_MAX_HASH; i++) {
+		struct list_head *list = &tomoyo_name_list[i];
+		struct tomoyo_shared_acl_head *ptr;
+		struct tomoyo_shared_acl_head *tmp;
+		list_for_each_entry_safe(ptr, tmp, list, list) {
+			if (atomic_read(&ptr->users) > 0)
+				continue;
+			atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+			tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list);
+		}
+	}
+	mutex_unlock(&tomoyo_policy_lock);
+}
+
+/**
+ * tomoyo_gc_thread - Garbage collector thread function.
+ *
+ * @unused: Unused.
+ *
+ * Returns 0.
+ */
+static int tomoyo_gc_thread(void *unused)
+{
+	/* Garbage collector thread is exclusive. */
+	static DEFINE_MUTEX(tomoyo_gc_mutex);
+	if (!mutex_trylock(&tomoyo_gc_mutex))
+		goto out;
+	tomoyo_collect_entry();
+	{
+		struct tomoyo_io_buffer *head;
+		struct tomoyo_io_buffer *tmp;
+
+		spin_lock(&tomoyo_io_buffer_list_lock);
+		list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list,
+					 list) {
+			if (head->users)
+				continue;
+			list_del(&head->list);
+			kfree(head->read_buf);
+			kfree(head->write_buf);
+			kfree(head);
+		}
+		spin_unlock(&tomoyo_io_buffer_list_lock);
+	}
+	mutex_unlock(&tomoyo_gc_mutex);
+out:
+	/* This acts as do_exit(0). */
+	return 0;
+}
+
+/**
+ * tomoyo_notify_gc - Register/unregister /sys/kernel/security/tomoyo/ users.
+ *
+ * @head:        Pointer to "struct tomoyo_io_buffer".
+ * @is_register: True if register, false if unregister.
+ *
+ * Returns nothing.
+ */
+void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register)
+{
+	bool is_write = false;
+
+	spin_lock(&tomoyo_io_buffer_list_lock);
+	if (is_register) {
+		head->users = 1;
+		list_add(&head->list, &tomoyo_io_buffer_list);
+	} else {
+		is_write = head->write_buf != NULL;
+		if (!--head->users) {
+			list_del(&head->list);
+			kfree(head->read_buf);
+			kfree(head->write_buf);
+			kfree(head);
+		}
+	}
+	spin_unlock(&tomoyo_io_buffer_list_lock);
+	if (is_write) {
+		struct task_struct *task = kthread_create(tomoyo_gc_thread,
+							  NULL,
+							  "GC for TOMOYO");
+		if (!IS_ERR(task))
+			wake_up_process(task);
+	}
+}
diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
new file mode 100644
index 0000000..5009253
--- /dev/null
+++ b/security/tomoyo/group.c
@@ -0,0 +1,198 @@
+/*
+ * security/tomoyo/group.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include <linux/slab.h>
+#include "common.h"
+
+/**
+ * tomoyo_same_path_group - Check for duplicated "struct tomoyo_path_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_path_group(const struct tomoyo_acl_head *a,
+				   const struct tomoyo_acl_head *b)
+{
+	return container_of(a, struct tomoyo_path_group, head)->member_name ==
+		container_of(b, struct tomoyo_path_group, head)->member_name;
+}
+
+/**
+ * tomoyo_same_number_group - Check for duplicated "struct tomoyo_number_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a,
+				     const struct tomoyo_acl_head *b)
+{
+	return !memcmp(&container_of(a, struct tomoyo_number_group, head)
+		       ->number,
+		       &container_of(b, struct tomoyo_number_group, head)
+		       ->number,
+		       sizeof(container_of(a, struct tomoyo_number_group, head)
+			      ->number));
+}
+
+/**
+ * tomoyo_same_address_group - Check for duplicated "struct tomoyo_address_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_address_group(const struct tomoyo_acl_head *a,
+				      const struct tomoyo_acl_head *b)
+{
+	const struct tomoyo_address_group *p1 = container_of(a, typeof(*p1),
+							     head);
+	const struct tomoyo_address_group *p2 = container_of(b, typeof(*p2),
+							     head);
+
+	return tomoyo_same_ipaddr_union(&p1->address, &p2->address);
+}
+
+/**
+ * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @type:  Type of this group.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type)
+{
+	struct tomoyo_group *group = tomoyo_get_group(param, type);
+	int error = -EINVAL;
+	if (!group)
+		return -ENOMEM;
+	param->list = &group->member_list;
+	if (type == TOMOYO_PATH_GROUP) {
+		struct tomoyo_path_group e = { };
+		e.member_name = tomoyo_get_name(tomoyo_read_token(param));
+		if (!e.member_name) {
+			error = -ENOMEM;
+			goto out;
+		}
+		error = tomoyo_update_policy(&e.head, sizeof(e), param,
+					  tomoyo_same_path_group);
+		tomoyo_put_name(e.member_name);
+	} else if (type == TOMOYO_NUMBER_GROUP) {
+		struct tomoyo_number_group e = { };
+		if (param->data[0] == '@' ||
+		    !tomoyo_parse_number_union(param, &e.number))
+			goto out;
+		error = tomoyo_update_policy(&e.head, sizeof(e), param,
+					  tomoyo_same_number_group);
+		/*
+		 * tomoyo_put_number_union() is not needed because
+		 * param->data[0] != '@'.
+		 */
+	} else {
+		struct tomoyo_address_group e = { };
+
+		if (param->data[0] == '@' ||
+		    !tomoyo_parse_ipaddr_union(param, &e.address))
+			goto out;
+		error = tomoyo_update_policy(&e.head, sizeof(e), param,
+					     tomoyo_same_address_group);
+	}
+out:
+	tomoyo_put_group(group);
+	return error;
+}
+
+/**
+ * tomoyo_path_matches_group - Check whether the given pathname matches members of the given pathname group.
+ *
+ * @pathname: The name of pathname.
+ * @group:    Pointer to "struct tomoyo_path_group".
+ *
+ * Returns matched member's pathname if @pathname matches pathnames in @group,
+ * NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+const struct tomoyo_path_info *
+tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
+			  const struct tomoyo_group *group)
+{
+	struct tomoyo_path_group *member;
+	list_for_each_entry_rcu(member, &group->member_list, head.list) {
+		if (member->head.is_deleted)
+			continue;
+		if (!tomoyo_path_matches_pattern(pathname, member->member_name))
+			continue;
+		return member->member_name;
+	}
+	return NULL;
+}
+
+/**
+ * tomoyo_number_matches_group - Check whether the given number matches members of the given number group.
+ *
+ * @min:   Min number.
+ * @max:   Max number.
+ * @group: Pointer to "struct tomoyo_number_group".
+ *
+ * Returns true if @min and @max partially overlaps @group, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_number_matches_group(const unsigned long min,
+				 const unsigned long max,
+				 const struct tomoyo_group *group)
+{
+	struct tomoyo_number_group *member;
+	bool matched = false;
+	list_for_each_entry_rcu(member, &group->member_list, head.list) {
+		if (member->head.is_deleted)
+			continue;
+		if (min > member->number.values[1] ||
+		    max < member->number.values[0])
+			continue;
+		matched = true;
+		break;
+	}
+	return matched;
+}
+
+/**
+ * tomoyo_address_matches_group - Check whether the given address matches members of the given address group.
+ *
+ * @is_ipv6: True if @address is an IPv6 address.
+ * @address: An IPv4 or IPv6 address.
+ * @group:   Pointer to "struct tomoyo_address_group".
+ *
+ * Returns true if @address matches addresses in @group group, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+				  const struct tomoyo_group *group)
+{
+	struct tomoyo_address_group *member;
+	bool matched = false;
+	const u8 size = is_ipv6 ? 16 : 4;
+
+	list_for_each_entry_rcu(member, &group->member_list, head.list) {
+		if (member->head.is_deleted)
+			continue;
+		if (member->address.is_ipv6 != is_ipv6)
+			continue;
+		if (memcmp(&member->address.ip[0], address, size) > 0 ||
+		    memcmp(address, &member->address.ip[1], size) > 0)
+			continue;
+		matched = true;
+		break;
+	}
+	return matched;
+}
diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c
new file mode 100644
index 0000000..078fac0
--- /dev/null
+++ b/security/tomoyo/load_policy.c
@@ -0,0 +1,109 @@
+/*
+ * security/tomoyo/load_policy.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include "common.h"
+
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+
+/*
+ * Path to the policy loader. (default = CONFIG_SECURITY_TOMOYO_POLICY_LOADER)
+ */
+static const char *tomoyo_loader;
+
+/**
+ * tomoyo_loader_setup - Set policy loader.
+ *
+ * @str: Program to use as a policy loader (e.g. /sbin/tomoyo-init ).
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_loader_setup(char *str)
+{
+	tomoyo_loader = str;
+	return 0;
+}
+
+__setup("TOMOYO_loader=", tomoyo_loader_setup);
+
+/**
+ * tomoyo_policy_loader_exists - Check whether /sbin/tomoyo-init exists.
+ *
+ * Returns true if /sbin/tomoyo-init exists, false otherwise.
+ */
+static bool tomoyo_policy_loader_exists(void)
+{
+	struct path path;
+	if (!tomoyo_loader)
+		tomoyo_loader = CONFIG_SECURITY_TOMOYO_POLICY_LOADER;
+	if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) {
+		printk(KERN_INFO "Not activating Mandatory Access Control "
+		       "as %s does not exist.\n", tomoyo_loader);
+		return false;
+	}
+	path_put(&path);
+	return true;
+}
+
+/*
+ * Path to the trigger. (default = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER)
+ */
+static const char *tomoyo_trigger;
+
+/**
+ * tomoyo_trigger_setup - Set trigger for activation.
+ *
+ * @str: Program to use as an activation trigger (e.g. /sbin/init ).
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_trigger_setup(char *str)
+{
+	tomoyo_trigger = str;
+	return 0;
+}
+
+__setup("TOMOYO_trigger=", tomoyo_trigger_setup);
+
+/**
+ * tomoyo_load_policy - Run external policy loader to load policy.
+ *
+ * @filename: The program about to start.
+ *
+ * This function checks whether @filename is /sbin/init , and if so
+ * invoke /sbin/tomoyo-init and wait for the termination of /sbin/tomoyo-init
+ * and then continues invocation of /sbin/init.
+ * /sbin/tomoyo-init reads policy files in /etc/tomoyo/ directory and
+ * writes to /sys/kernel/security/tomoyo/ interfaces.
+ *
+ * Returns nothing.
+ */
+void tomoyo_load_policy(const char *filename)
+{
+	static bool done;
+	char *argv[2];
+	char *envp[3];
+
+	if (tomoyo_policy_loaded || done)
+		return;
+	if (!tomoyo_trigger)
+		tomoyo_trigger = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER;
+	if (strcmp(filename, tomoyo_trigger))
+		return;
+	if (!tomoyo_policy_loader_exists())
+		return;
+	done = true;
+	printk(KERN_INFO "Calling %s to load policy. Please wait.\n",
+	       tomoyo_loader);
+	argv[0] = (char *) tomoyo_loader;
+	argv[1] = NULL;
+	envp[0] = "HOME=/";
+	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+	envp[2] = NULL;
+	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+	tomoyo_check_profile();
+}
+
+#endif
diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c
new file mode 100644
index 0000000..0e99571
--- /dev/null
+++ b/security/tomoyo/memory.c
@@ -0,0 +1,201 @@
+/*
+ * security/tomoyo/memory.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include <linux/hash.h>
+#include <linux/slab.h>
+#include "common.h"
+
+/**
+ * tomoyo_warn_oom - Print out of memory warning message.
+ *
+ * @function: Function's name.
+ */
+void tomoyo_warn_oom(const char *function)
+{
+	/* Reduce error messages. */
+	static pid_t tomoyo_last_pid;
+	const pid_t pid = current->pid;
+	if (tomoyo_last_pid != pid) {
+		printk(KERN_WARNING "ERROR: Out of memory at %s.\n",
+		       function);
+		tomoyo_last_pid = pid;
+	}
+	if (!tomoyo_policy_loaded)
+		panic("MAC Initialization failed.\n");
+}
+
+/* Memoy currently used by policy/audit log/query. */
+unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
+/* Memory quota for "policy"/"audit log"/"query". */
+unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
+
+/**
+ * tomoyo_memory_ok - Check memory quota.
+ *
+ * @ptr: Pointer to allocated memory.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Returns true if @ptr is not NULL and quota not exceeded, false otherwise.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+bool tomoyo_memory_ok(void *ptr)
+{
+	if (ptr) {
+		const size_t s = ksize(ptr);
+		tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s;
+		if (!tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
+		    tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
+		    tomoyo_memory_quota[TOMOYO_MEMORY_POLICY])
+			return true;
+		tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
+	}
+	tomoyo_warn_oom(__func__);
+	return false;
+}
+
+/**
+ * tomoyo_commit_ok - Check memory quota.
+ *
+ * @data:   Data to copy from.
+ * @size:   Size in byte.
+ *
+ * Returns pointer to allocated memory on success, NULL otherwise.
+ * @data is zero-cleared on success.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+void *tomoyo_commit_ok(void *data, const unsigned int size)
+{
+	void *ptr = kzalloc(size, GFP_NOFS);
+	if (tomoyo_memory_ok(ptr)) {
+		memmove(ptr, data, size);
+		memset(data, 0, size);
+		return ptr;
+	}
+	kfree(ptr);
+	return NULL;
+}
+
+/**
+ * tomoyo_get_group - Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group".
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @idx:   Index number.
+ *
+ * Returns pointer to "struct tomoyo_group" on success, NULL otherwise.
+ */
+struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
+				      const u8 idx)
+{
+	struct tomoyo_group e = { };
+	struct tomoyo_group *group = NULL;
+	struct list_head *list;
+	const char *group_name = tomoyo_read_token(param);
+	bool found = false;
+	if (!tomoyo_correct_word(group_name) || idx >= TOMOYO_MAX_GROUP)
+		return NULL;
+	e.group_name = tomoyo_get_name(group_name);
+	if (!e.group_name)
+		return NULL;
+	if (mutex_lock_interruptible(&tomoyo_policy_lock))
+		goto out;
+	list = &param->ns->group_list[idx];
+	list_for_each_entry(group, list, head.list) {
+		if (e.group_name != group->group_name ||
+		    atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS)
+			continue;
+		atomic_inc(&group->head.users);
+		found = true;
+		break;
+	}
+	if (!found) {
+		struct tomoyo_group *entry = tomoyo_commit_ok(&e, sizeof(e));
+		if (entry) {
+			INIT_LIST_HEAD(&entry->member_list);
+			atomic_set(&entry->head.users, 1);
+			list_add_tail_rcu(&entry->head.list, list);
+			group = entry;
+			found = true;
+		}
+	}
+	mutex_unlock(&tomoyo_policy_lock);
+out:
+	tomoyo_put_name(e.group_name);
+	return found ? group : NULL;
+}
+
+/*
+ * tomoyo_name_list is used for holding string data used by TOMOYO.
+ * Since same string data is likely used for multiple times (e.g.
+ * "/lib/libc-2.5.so"), TOMOYO shares string data in the form of
+ * "const struct tomoyo_path_info *".
+ */
+struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+
+/**
+ * tomoyo_get_name - Allocate permanent memory for string data.
+ *
+ * @name: The string to store into the permernent memory.
+ *
+ * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
+ */
+const struct tomoyo_path_info *tomoyo_get_name(const char *name)
+{
+	struct tomoyo_name *ptr;
+	unsigned int hash;
+	int len;
+	struct list_head *head;
+
+	if (!name)
+		return NULL;
+	len = strlen(name) + 1;
+	hash = full_name_hash((const unsigned char *) name, len - 1);
+	head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
+	if (mutex_lock_interruptible(&tomoyo_policy_lock))
+		return NULL;
+	list_for_each_entry(ptr, head, head.list) {
+		if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) ||
+		    atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
+			continue;
+		atomic_inc(&ptr->head.users);
+		goto out;
+	}
+	ptr = kzalloc(sizeof(*ptr) + len, GFP_NOFS);
+	if (tomoyo_memory_ok(ptr)) {
+		ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
+		memmove((char *) ptr->entry.name, name, len);
+		atomic_set(&ptr->head.users, 1);
+		tomoyo_fill_path_info(&ptr->entry);
+		list_add_tail(&ptr->head.list, head);
+	} else {
+		kfree(ptr);
+		ptr = NULL;
+	}
+out:
+	mutex_unlock(&tomoyo_policy_lock);
+	return ptr ? &ptr->entry : NULL;
+}
+
+/* Initial namespace.*/
+struct tomoyo_policy_namespace tomoyo_kernel_namespace;
+
+/**
+ * tomoyo_mm_init - Initialize mm related code.
+ */
+void __init tomoyo_mm_init(void)
+{
+	int idx;
+	for (idx = 0; idx < TOMOYO_MAX_HASH; idx++)
+		INIT_LIST_HEAD(&tomoyo_name_list[idx]);
+	tomoyo_kernel_namespace.name = "<kernel>";
+	tomoyo_init_policy_namespace(&tomoyo_kernel_namespace);
+	tomoyo_kernel_domain.ns = &tomoyo_kernel_namespace;
+	INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
+	tomoyo_kernel_domain.domainname = tomoyo_get_name("<kernel>");
+	list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
+}
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
new file mode 100644
index 0000000..390c646
--- /dev/null
+++ b/security/tomoyo/mount.c
@@ -0,0 +1,236 @@
+/*
+ * security/tomoyo/mount.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include <linux/slab.h>
+#include "common.h"
+
+/* String table for special mount operations. */
+static const char * const tomoyo_mounts[TOMOYO_MAX_SPECIAL_MOUNT] = {
+	[TOMOYO_MOUNT_BIND]            = "--bind",
+	[TOMOYO_MOUNT_MOVE]            = "--move",
+	[TOMOYO_MOUNT_REMOUNT]         = "--remount",
+	[TOMOYO_MOUNT_MAKE_UNBINDABLE] = "--make-unbindable",
+	[TOMOYO_MOUNT_MAKE_PRIVATE]    = "--make-private",
+	[TOMOYO_MOUNT_MAKE_SLAVE]      = "--make-slave",
+	[TOMOYO_MOUNT_MAKE_SHARED]     = "--make-shared",
+};
+
+/**
+ * tomoyo_audit_mount_log - Audit mount log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_mount_log(struct tomoyo_request_info *r)
+{
+	return tomoyo_supervisor(r, "file mount %s %s %s 0x%lX\n",
+				 r->param.mount.dev->name,
+				 r->param.mount.dir->name,
+				 r->param.mount.type->name,
+				 r->param.mount.flags);
+}
+
+/**
+ * tomoyo_check_mount_acl - Check permission for path path path number operation.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r,
+				   const struct tomoyo_acl_info *ptr)
+{
+	const struct tomoyo_mount_acl *acl =
+		container_of(ptr, typeof(*acl), head);
+	return tomoyo_compare_number_union(r->param.mount.flags,
+					   &acl->flags) &&
+		tomoyo_compare_name_union(r->param.mount.type,
+					  &acl->fs_type) &&
+		tomoyo_compare_name_union(r->param.mount.dir,
+					  &acl->dir_name) &&
+		(!r->param.mount.need_dev ||
+		 tomoyo_compare_name_union(r->param.mount.dev,
+					   &acl->dev_name));
+}
+
+/**
+ * tomoyo_mount_acl - Check permission for mount() operation.
+ *
+ * @r:        Pointer to "struct tomoyo_request_info".
+ * @dev_name: Name of device file. Maybe NULL.
+ * @dir:      Pointer to "struct path".
+ * @type:     Name of filesystem type.
+ * @flags:    Mount options.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_mount_acl(struct tomoyo_request_info *r,
+			    const char *dev_name,
+			    struct path *dir, const char *type,
+			    unsigned long flags)
+{
+	struct tomoyo_obj_info obj = { };
+	struct path path;
+	struct file_system_type *fstype = NULL;
+	const char *requested_type = NULL;
+	const char *requested_dir_name = NULL;
+	const char *requested_dev_name = NULL;
+	struct tomoyo_path_info rtype;
+	struct tomoyo_path_info rdev;
+	struct tomoyo_path_info rdir;
+	int need_dev = 0;
+	int error = -ENOMEM;
+	r->obj = &obj;
+
+	/* Get fstype. */
+	requested_type = tomoyo_encode(type);
+	if (!requested_type)
+		goto out;
+	rtype.name = requested_type;
+	tomoyo_fill_path_info(&rtype);
+
+	/* Get mount point. */
+	obj.path2 = *dir;
+	requested_dir_name = tomoyo_realpath_from_path(dir);
+	if (!requested_dir_name) {
+		error = -ENOMEM;
+		goto out;
+	}
+	rdir.name = requested_dir_name;
+	tomoyo_fill_path_info(&rdir);
+
+	/* Compare fs name. */
+	if (type == tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]) {
+		/* dev_name is ignored. */
+	} else if (type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE] ||
+		   type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE] ||
+		   type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE] ||
+		   type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]) {
+		/* dev_name is ignored. */
+	} else if (type == tomoyo_mounts[TOMOYO_MOUNT_BIND] ||
+		   type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
+		need_dev = -1; /* dev_name is a directory */
+	} else {
+		fstype = get_fs_type(type);
+		if (!fstype) {
+			error = -ENODEV;
+			goto out;
+		}
+		if (fstype->fs_flags & FS_REQUIRES_DEV)
+			/* dev_name is a block device file. */
+			need_dev = 1;
+	}
+	if (need_dev) {
+		/* Get mount point or device file. */
+		if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
+			error = -ENOENT;
+			goto out;
+		}
+		obj.path1 = path;
+		requested_dev_name = tomoyo_realpath_from_path(&path);
+		if (!requested_dev_name) {
+			error = -ENOENT;
+			goto out;
+		}
+	} else {
+		/* Map dev_name to "<NULL>" if no dev_name given. */
+		if (!dev_name)
+			dev_name = "<NULL>";
+		requested_dev_name = tomoyo_encode(dev_name);
+		if (!requested_dev_name) {
+			error = -ENOMEM;
+			goto out;
+		}
+	}
+	rdev.name = requested_dev_name;
+	tomoyo_fill_path_info(&rdev);
+	r->param_type = TOMOYO_TYPE_MOUNT_ACL;
+	r->param.mount.need_dev = need_dev;
+	r->param.mount.dev = &rdev;
+	r->param.mount.dir = &rdir;
+	r->param.mount.type = &rtype;
+	r->param.mount.flags = flags;
+	do {
+		tomoyo_check_acl(r, tomoyo_check_mount_acl);
+		error = tomoyo_audit_mount_log(r);
+	} while (error == TOMOYO_RETRY_REQUEST);
+ out:
+	kfree(requested_dev_name);
+	kfree(requested_dir_name);
+	if (fstype)
+		put_filesystem(fstype);
+	kfree(requested_type);
+	/* Drop refcount obtained by kern_path(). */
+	if (obj.path1.dentry)
+		path_put(&obj.path1);
+	return error;
+}
+
+/**
+ * tomoyo_mount_permission - Check permission for mount() operation.
+ *
+ * @dev_name:  Name of device file. Maybe NULL.
+ * @path:      Pointer to "struct path".
+ * @type:      Name of filesystem type. Maybe NULL.
+ * @flags:     Mount options.
+ * @data_page: Optional data. Maybe NULL.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_mount_permission(const char *dev_name, struct path *path,
+			    const char *type, unsigned long flags,
+			    void *data_page)
+{
+	struct tomoyo_request_info r;
+	int error;
+	int idx;
+
+	if (tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_MOUNT)
+	    == TOMOYO_CONFIG_DISABLED)
+		return 0;
+	if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+		flags &= ~MS_MGC_MSK;
+	if (flags & MS_REMOUNT) {
+		type = tomoyo_mounts[TOMOYO_MOUNT_REMOUNT];
+		flags &= ~MS_REMOUNT;
+	} else if (flags & MS_BIND) {
+		type = tomoyo_mounts[TOMOYO_MOUNT_BIND];
+		flags &= ~MS_BIND;
+	} else if (flags & MS_SHARED) {
+		if (flags & (MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+			return -EINVAL;
+		type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED];
+		flags &= ~MS_SHARED;
+	} else if (flags & MS_PRIVATE) {
+		if (flags & (MS_SHARED | MS_SLAVE | MS_UNBINDABLE))
+			return -EINVAL;
+		type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE];
+		flags &= ~MS_PRIVATE;
+	} else if (flags & MS_SLAVE) {
+		if (flags & (MS_SHARED | MS_PRIVATE | MS_UNBINDABLE))
+			return -EINVAL;
+		type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE];
+		flags &= ~MS_SLAVE;
+	} else if (flags & MS_UNBINDABLE) {
+		if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE))
+			return -EINVAL;
+		type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE];
+		flags &= ~MS_UNBINDABLE;
+	} else if (flags & MS_MOVE) {
+		type = tomoyo_mounts[TOMOYO_MOUNT_MOVE];
+		flags &= ~MS_MOVE;
+	}
+	if (!type)
+		type = "<NULL>";
+	idx = tomoyo_read_lock();
+	error = tomoyo_mount_acl(&r, dev_name, path, type, flags);
+	tomoyo_read_unlock(idx);
+	return error;
+}
diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
new file mode 100644
index 0000000..9752771
--- /dev/null
+++ b/security/tomoyo/network.c
@@ -0,0 +1,771 @@
+/*
+ * security/tomoyo/network.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/* Structure for holding inet domain socket's address. */
+struct tomoyo_inet_addr_info {
+	__be16 port;           /* In network byte order. */
+	const __be32 *address; /* In network byte order. */
+	bool is_ipv6;
+};
+
+/* Structure for holding unix domain socket's address. */
+struct tomoyo_unix_addr_info {
+	u8 *addr; /* This may not be '\0' terminated string. */
+	unsigned int addr_len;
+};
+
+/* Structure for holding socket address. */
+struct tomoyo_addr_info {
+	u8 protocol;
+	u8 operation;
+	struct tomoyo_inet_addr_info inet;
+	struct tomoyo_unix_addr_info unix0;
+};
+
+/* String table for socket's protocols. */
+const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX] = {
+	[SOCK_STREAM]    = "stream",
+	[SOCK_DGRAM]     = "dgram",
+	[SOCK_RAW]       = "raw",
+	[SOCK_SEQPACKET] = "seqpacket",
+	[0] = " ", /* Dummy for avoiding NULL pointer dereference. */
+	[4] = " ", /* Dummy for avoiding NULL pointer dereference. */
+};
+
+/**
+ * tomoyo_parse_ipaddr_union - Parse an IP address.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr:   Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+			       struct tomoyo_ipaddr_union *ptr)
+{
+	u8 * const min = ptr->ip[0].in6_u.u6_addr8;
+	u8 * const max = ptr->ip[1].in6_u.u6_addr8;
+	char *address = tomoyo_read_token(param);
+	const char *end;
+
+	if (!strchr(address, ':') &&
+	    in4_pton(address, -1, min, '-', &end) > 0) {
+		ptr->is_ipv6 = false;
+		if (!*end)
+			ptr->ip[1].s6_addr32[0] = ptr->ip[0].s6_addr32[0];
+		else if (*end++ != '-' ||
+			 in4_pton(end, -1, max, '\0', &end) <= 0 || *end)
+			return false;
+		return true;
+	}
+	if (in6_pton(address, -1, min, '-', &end) > 0) {
+		ptr->is_ipv6 = true;
+		if (!*end)
+			memmove(max, min, sizeof(u16) * 8);
+		else if (*end++ != '-' ||
+			 in6_pton(end, -1, max, '\0', &end) <= 0 || *end)
+			return false;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * tomoyo_print_ipv4 - Print an IPv4 address.
+ *
+ * @buffer:     Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip:     Pointer to __be32.
+ * @max_ip:     Pointer to __be32.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv4(char *buffer, const unsigned int buffer_len,
+			      const __be32 *min_ip, const __be32 *max_ip)
+{
+	snprintf(buffer, buffer_len, "%pI4%c%pI4", min_ip,
+		 *min_ip == *max_ip ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ipv6 - Print an IPv6 address.
+ *
+ * @buffer:     Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip:     Pointer to "struct in6_addr".
+ * @max_ip:     Pointer to "struct in6_addr".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv6(char *buffer, const unsigned int buffer_len,
+			      const struct in6_addr *min_ip,
+			      const struct in6_addr *max_ip)
+{
+	snprintf(buffer, buffer_len, "%pI6c%c%pI6c", min_ip,
+		 !memcmp(min_ip, max_ip, 16) ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ip - Print an IP address.
+ *
+ * @buf:  Buffer to write to.
+ * @size: Size of @buf.
+ * @ptr:  Pointer to "struct ipaddr_union".
+ *
+ * Returns nothing.
+ */
+void tomoyo_print_ip(char *buf, const unsigned int size,
+		     const struct tomoyo_ipaddr_union *ptr)
+{
+	if (ptr->is_ipv6)
+		tomoyo_print_ipv6(buf, size, &ptr->ip[0], &ptr->ip[1]);
+	else
+		tomoyo_print_ipv4(buf, size, &ptr->ip[0].s6_addr32[0],
+				  &ptr->ip[1].s6_addr32[0]);
+}
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for inet domain socket.
+ */
+static const u8 tomoyo_inet2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+	[SOCK_STREAM] = {
+		[TOMOYO_NETWORK_BIND]    = TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+		[TOMOYO_NETWORK_LISTEN]  =
+		TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+		[TOMOYO_NETWORK_CONNECT] =
+		TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+	},
+	[SOCK_DGRAM] = {
+		[TOMOYO_NETWORK_BIND]    = TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+		[TOMOYO_NETWORK_SEND]    = TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+	},
+	[SOCK_RAW]    = {
+		[TOMOYO_NETWORK_BIND]    = TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+		[TOMOYO_NETWORK_SEND]    = TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+	},
+};
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for unix domain socket.
+ */
+static const u8 tomoyo_unix2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+	[SOCK_STREAM] = {
+		[TOMOYO_NETWORK_BIND]    = TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+		[TOMOYO_NETWORK_LISTEN]  =
+		TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+		[TOMOYO_NETWORK_CONNECT] =
+		TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+	},
+	[SOCK_DGRAM] = {
+		[TOMOYO_NETWORK_BIND]    = TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+		[TOMOYO_NETWORK_SEND]    = TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+	},
+	[SOCK_SEQPACKET] = {
+		[TOMOYO_NETWORK_BIND]    =
+		TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+		[TOMOYO_NETWORK_LISTEN]  =
+		TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+		[TOMOYO_NETWORK_CONNECT] =
+		TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+	},
+};
+
+/**
+ * tomoyo_same_inet_acl - Check for duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_inet_acl(const struct tomoyo_acl_info *a,
+				 const struct tomoyo_acl_info *b)
+{
+	const struct tomoyo_inet_acl *p1 = container_of(a, typeof(*p1), head);
+	const struct tomoyo_inet_acl *p2 = container_of(b, typeof(*p2), head);
+
+	return p1->protocol == p2->protocol &&
+		tomoyo_same_ipaddr_union(&p1->address, &p2->address) &&
+		tomoyo_same_number_union(&p1->port, &p2->port);
+}
+
+/**
+ * tomoyo_same_unix_acl - Check for duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_unix_acl(const struct tomoyo_acl_info *a,
+				 const struct tomoyo_acl_info *b)
+{
+	const struct tomoyo_unix_acl *p1 = container_of(a, typeof(*p1), head);
+	const struct tomoyo_unix_acl *p2 = container_of(b, typeof(*p2), head);
+
+	return p1->protocol == p2->protocol &&
+		tomoyo_same_name_union(&p1->name, &p2->name);
+}
+
+/**
+ * tomoyo_merge_inet_acl - Merge duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a:         Pointer to "struct tomoyo_acl_info".
+ * @b:         Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_inet_acl(struct tomoyo_acl_info *a,
+				  struct tomoyo_acl_info *b,
+				  const bool is_delete)
+{
+	u8 * const a_perm =
+		&container_of(a, struct tomoyo_inet_acl, head)->perm;
+	u8 perm = *a_perm;
+	const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm;
+
+	if (is_delete)
+		perm &= ~b_perm;
+	else
+		perm |= b_perm;
+	*a_perm = perm;
+	return !perm;
+}
+
+/**
+ * tomoyo_merge_unix_acl - Merge duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a:         Pointer to "struct tomoyo_acl_info".
+ * @b:         Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_unix_acl(struct tomoyo_acl_info *a,
+				  struct tomoyo_acl_info *b,
+				  const bool is_delete)
+{
+	u8 * const a_perm =
+		&container_of(a, struct tomoyo_unix_acl, head)->perm;
+	u8 perm = *a_perm;
+	const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm;
+
+	if (is_delete)
+		perm &= ~b_perm;
+	else
+		perm |= b_perm;
+	*a_perm = perm;
+	return !perm;
+}
+
+/**
+ * tomoyo_write_inet_network - Write "struct tomoyo_inet_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param)
+{
+	struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL };
+	int error = -EINVAL;
+	u8 type;
+	const char *protocol = tomoyo_read_token(param);
+	const char *operation = tomoyo_read_token(param);
+
+	for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+		if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+			break;
+	for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+		if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+			e.perm |= 1 << type;
+	if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+		return -EINVAL;
+	if (param->data[0] == '@') {
+		param->data++;
+		e.address.group =
+			tomoyo_get_group(param, TOMOYO_ADDRESS_GROUP);
+		if (!e.address.group)
+			return -ENOMEM;
+	} else {
+		if (!tomoyo_parse_ipaddr_union(param, &e.address))
+			goto out;
+	}
+	if (!tomoyo_parse_number_union(param, &e.port) ||
+	    e.port.values[1] > 65535)
+		goto out;
+	error = tomoyo_update_domain(&e.head, sizeof(e), param,
+				     tomoyo_same_inet_acl,
+				     tomoyo_merge_inet_acl);
+out:
+	tomoyo_put_group(e.address.group);
+	tomoyo_put_number_union(&e.port);
+	return error;
+}
+
+/**
+ * tomoyo_write_unix_network - Write "struct tomoyo_unix_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param)
+{
+	struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL };
+	int error;
+	u8 type;
+	const char *protocol = tomoyo_read_token(param);
+	const char *operation = tomoyo_read_token(param);
+
+	for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+		if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+			break;
+	for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+		if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+			e.perm |= 1 << type;
+	if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+		return -EINVAL;
+	if (!tomoyo_parse_name_union(param, &e.name))
+		return -EINVAL;
+	error = tomoyo_update_domain(&e.head, sizeof(e), param,
+				     tomoyo_same_unix_acl,
+				     tomoyo_merge_unix_acl);
+	tomoyo_put_name_union(&e.name);
+	return error;
+}
+
+/**
+ * tomoyo_audit_net_log - Audit network log.
+ *
+ * @r:         Pointer to "struct tomoyo_request_info".
+ * @family:    Name of socket family ("inet" or "unix").
+ * @protocol:  Name of protocol in @family.
+ * @operation: Name of socket operation.
+ * @address:   Name of address.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_net_log(struct tomoyo_request_info *r,
+				const char *family, const u8 protocol,
+				const u8 operation, const char *address)
+{
+	return tomoyo_supervisor(r, "network %s %s %s %s\n", family,
+				 tomoyo_proto_keyword[protocol],
+				 tomoyo_socket_keyword[operation], address);
+}
+
+/**
+ * tomoyo_audit_inet_log - Audit INET network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_inet_log(struct tomoyo_request_info *r)
+{
+	char buf[128];
+	int len;
+	const __be32 *address = r->param.inet_network.address;
+
+	if (r->param.inet_network.is_ipv6)
+		tomoyo_print_ipv6(buf, sizeof(buf), (const struct in6_addr *)
+				  address, (const struct in6_addr *) address);
+	else
+		tomoyo_print_ipv4(buf, sizeof(buf), address, address);
+	len = strlen(buf);
+	snprintf(buf + len, sizeof(buf) - len, " %u",
+		 r->param.inet_network.port);
+	return tomoyo_audit_net_log(r, "inet", r->param.inet_network.protocol,
+				    r->param.inet_network.operation, buf);
+}
+
+/**
+ * tomoyo_audit_unix_log - Audit UNIX network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_unix_log(struct tomoyo_request_info *r)
+{
+	return tomoyo_audit_net_log(r, "unix", r->param.unix_network.protocol,
+				    r->param.unix_network.operation,
+				    r->param.unix_network.address->name);
+}
+
+/**
+ * tomoyo_check_inet_acl - Check permission for inet domain socket operation.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_inet_acl(struct tomoyo_request_info *r,
+				  const struct tomoyo_acl_info *ptr)
+{
+	const struct tomoyo_inet_acl *acl =
+		container_of(ptr, typeof(*acl), head);
+	const u8 size = r->param.inet_network.is_ipv6 ? 16 : 4;
+
+	if (!(acl->perm & (1 << r->param.inet_network.operation)) ||
+	    !tomoyo_compare_number_union(r->param.inet_network.port,
+					 &acl->port))
+		return false;
+	if (acl->address.group)
+		return tomoyo_address_matches_group
+			(r->param.inet_network.is_ipv6,
+			 r->param.inet_network.address, acl->address.group);
+	return acl->address.is_ipv6 == r->param.inet_network.is_ipv6 &&
+		memcmp(&acl->address.ip[0],
+		       r->param.inet_network.address, size) <= 0 &&
+		memcmp(r->param.inet_network.address,
+		       &acl->address.ip[1], size) <= 0;
+}
+
+/**
+ * tomoyo_check_unix_acl - Check permission for unix domain socket operation.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_unix_acl(struct tomoyo_request_info *r,
+				  const struct tomoyo_acl_info *ptr)
+{
+	const struct tomoyo_unix_acl *acl =
+		container_of(ptr, typeof(*acl), head);
+
+	return (acl->perm & (1 << r->param.unix_network.operation)) &&
+		tomoyo_compare_name_union(r->param.unix_network.address,
+					  &acl->name);
+}
+
+/**
+ * tomoyo_inet_entry - Check permission for INET network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_inet_entry(const struct tomoyo_addr_info *address)
+{
+	const int idx = tomoyo_read_lock();
+	struct tomoyo_request_info r;
+	int error = 0;
+	const u8 type = tomoyo_inet2mac[address->protocol][address->operation];
+
+	if (type && tomoyo_init_request_info(&r, NULL, type)
+	    != TOMOYO_CONFIG_DISABLED) {
+		r.param_type = TOMOYO_TYPE_INET_ACL;
+		r.param.inet_network.protocol = address->protocol;
+		r.param.inet_network.operation = address->operation;
+		r.param.inet_network.is_ipv6 = address->inet.is_ipv6;
+		r.param.inet_network.address = address->inet.address;
+		r.param.inet_network.port = ntohs(address->inet.port);
+		do {
+			tomoyo_check_acl(&r, tomoyo_check_inet_acl);
+			error = tomoyo_audit_inet_log(&r);
+		} while (error == TOMOYO_RETRY_REQUEST);
+	}
+	tomoyo_read_unlock(idx);
+	return error;
+}
+
+/**
+ * tomoyo_check_inet_address - Check permission for inet domain socket's operation.
+ *
+ * @addr:     Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @port:     Port number.
+ * @address:  Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_inet_address(const struct sockaddr *addr,
+				     const unsigned int addr_len,
+				     const u16 port,
+				     struct tomoyo_addr_info *address)
+{
+	struct tomoyo_inet_addr_info *i = &address->inet;
+
+	switch (addr->sa_family) {
+	case AF_INET6:
+		if (addr_len < SIN6_LEN_RFC2133)
+			goto skip;
+		i->is_ipv6 = true;
+		i->address = (__be32 *)
+			((struct sockaddr_in6 *) addr)->sin6_addr.s6_addr;
+		i->port = ((struct sockaddr_in6 *) addr)->sin6_port;
+		break;
+	case AF_INET:
+		if (addr_len < sizeof(struct sockaddr_in))
+			goto skip;
+		i->is_ipv6 = false;
+		i->address = (__be32 *)
+			&((struct sockaddr_in *) addr)->sin_addr;
+		i->port = ((struct sockaddr_in *) addr)->sin_port;
+		break;
+	default:
+		goto skip;
+	}
+	if (address->protocol == SOCK_RAW)
+		i->port = htons(port);
+	return tomoyo_inet_entry(address);
+skip:
+	return 0;
+}
+
+/**
+ * tomoyo_unix_entry - Check permission for UNIX network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_unix_entry(const struct tomoyo_addr_info *address)
+{
+	const int idx = tomoyo_read_lock();
+	struct tomoyo_request_info r;
+	int error = 0;
+	const u8 type = tomoyo_unix2mac[address->protocol][address->operation];
+
+	if (type && tomoyo_init_request_info(&r, NULL, type)
+	    != TOMOYO_CONFIG_DISABLED) {
+		char *buf = address->unix0.addr;
+		int len = address->unix0.addr_len - sizeof(sa_family_t);
+
+		if (len <= 0) {
+			buf = "anonymous";
+			len = 9;
+		} else if (buf[0]) {
+			len = strnlen(buf, len);
+		}
+		buf = tomoyo_encode2(buf, len);
+		if (buf) {
+			struct tomoyo_path_info addr;
+
+			addr.name = buf;
+			tomoyo_fill_path_info(&addr);
+			r.param_type = TOMOYO_TYPE_UNIX_ACL;
+			r.param.unix_network.protocol = address->protocol;
+			r.param.unix_network.operation = address->operation;
+			r.param.unix_network.address = &addr;
+			do {
+				tomoyo_check_acl(&r, tomoyo_check_unix_acl);
+				error = tomoyo_audit_unix_log(&r);
+			} while (error == TOMOYO_RETRY_REQUEST);
+			kfree(buf);
+		} else
+			error = -ENOMEM;
+	}
+	tomoyo_read_unlock(idx);
+	return error;
+}
+
+/**
+ * tomoyo_check_unix_address - Check permission for unix domain socket's operation.
+ *
+ * @addr:     Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @address:  Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_unix_address(struct sockaddr *addr,
+				     const unsigned int addr_len,
+				     struct tomoyo_addr_info *address)
+{
+	struct tomoyo_unix_addr_info *u = &address->unix0;
+
+	if (addr->sa_family != AF_UNIX)
+		return 0;
+	u->addr = ((struct sockaddr_un *) addr)->sun_path;
+	u->addr_len = addr_len;
+	return tomoyo_unix_entry(address);
+}
+
+/**
+ * tomoyo_kernel_service - Check whether I'm kernel service or not.
+ *
+ * Returns true if I'm kernel service, false otherwise.
+ */
+static bool tomoyo_kernel_service(void)
+{
+	/* Nothing to do if I am a kernel service. */
+	return segment_eq(get_fs(), KERNEL_DS);
+}
+
+/**
+ * tomoyo_sock_family - Get socket's family.
+ *
+ * @sk: Pointer to "struct sock".
+ *
+ * Returns one of PF_INET, PF_INET6, PF_UNIX or 0.
+ */
+static u8 tomoyo_sock_family(struct sock *sk)
+{
+	u8 family;
+
+	if (tomoyo_kernel_service())
+		return 0;
+	family = sk->sk_family;
+	switch (family) {
+	case PF_INET:
+	case PF_INET6:
+	case PF_UNIX:
+		return family;
+	default:
+		return 0;
+	}
+}
+
+/**
+ * tomoyo_socket_listen_permission - Check permission for listening a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_listen_permission(struct socket *sock)
+{
+	struct tomoyo_addr_info address;
+	const u8 family = tomoyo_sock_family(sock->sk);
+	const unsigned int type = sock->type;
+	struct sockaddr_storage addr;
+	int addr_len;
+
+	if (!family || (type != SOCK_STREAM && type != SOCK_SEQPACKET))
+		return 0;
+	{
+		const int error = sock->ops->getname(sock, (struct sockaddr *)
+						     &addr, &addr_len, 0);
+
+		if (error)
+			return error;
+	}
+	address.protocol = type;
+	address.operation = TOMOYO_NETWORK_LISTEN;
+	if (family == PF_UNIX)
+		return tomoyo_check_unix_address((struct sockaddr *) &addr,
+						 addr_len, &address);
+	return tomoyo_check_inet_address((struct sockaddr *) &addr, addr_len,
+					 0, &address);
+}
+
+/**
+ * tomoyo_socket_connect_permission - Check permission for setting the remote address of a socket.
+ *
+ * @sock:     Pointer to "struct socket".
+ * @addr:     Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_connect_permission(struct socket *sock,
+				     struct sockaddr *addr, int addr_len)
+{
+	struct tomoyo_addr_info address;
+	const u8 family = tomoyo_sock_family(sock->sk);
+	const unsigned int type = sock->type;
+
+	if (!family)
+		return 0;
+	address.protocol = type;
+	switch (type) {
+	case SOCK_DGRAM:
+	case SOCK_RAW:
+		address.operation = TOMOYO_NETWORK_SEND;
+		break;
+	case SOCK_STREAM:
+	case SOCK_SEQPACKET:
+		address.operation = TOMOYO_NETWORK_CONNECT;
+		break;
+	default:
+		return 0;
+	}
+	if (family == PF_UNIX)
+		return tomoyo_check_unix_address(addr, addr_len, &address);
+	return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+					 &address);
+}
+
+/**
+ * tomoyo_socket_bind_permission - Check permission for setting the local address of a socket.
+ *
+ * @sock:     Pointer to "struct socket".
+ * @addr:     Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+				  int addr_len)
+{
+	struct tomoyo_addr_info address;
+	const u8 family = tomoyo_sock_family(sock->sk);
+	const unsigned int type = sock->type;
+
+	if (!family)
+		return 0;
+	switch (type) {
+	case SOCK_STREAM:
+	case SOCK_DGRAM:
+	case SOCK_RAW:
+	case SOCK_SEQPACKET:
+		address.protocol = type;
+		address.operation = TOMOYO_NETWORK_BIND;
+		break;
+	default:
+		return 0;
+	}
+	if (family == PF_UNIX)
+		return tomoyo_check_unix_address(addr, addr_len, &address);
+	return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+					 &address);
+}
+
+/**
+ * tomoyo_socket_sendmsg_permission - Check permission for sending a datagram.
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg:  Pointer to "struct msghdr".
+ * @size: Unused.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+				     int size)
+{
+	struct tomoyo_addr_info address;
+	const u8 family = tomoyo_sock_family(sock->sk);
+	const unsigned int type = sock->type;
+
+	if (!msg->msg_name || !family ||
+	    (type != SOCK_DGRAM && type != SOCK_RAW))
+		return 0;
+	address.protocol = type;
+	address.operation = TOMOYO_NETWORK_SEND;
+	if (family == PF_UNIX)
+		return tomoyo_check_unix_address((struct sockaddr *)
+						 msg->msg_name,
+						 msg->msg_namelen, &address);
+	return tomoyo_check_inet_address((struct sockaddr *) msg->msg_name,
+					 msg->msg_namelen,
+					 sock->sk->sk_protocol, &address);
+}
diff --git a/security/tomoyo/policy/exception_policy.conf.default b/security/tomoyo/policy/exception_policy.conf.default
new file mode 100644
index 0000000..2678df4
--- /dev/null
+++ b/security/tomoyo/policy/exception_policy.conf.default
@@ -0,0 +1,2 @@
+initialize_domain /sbin/modprobe from any
+initialize_domain /sbin/hotplug from any
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
new file mode 100644
index 0000000..5077f19
--- /dev/null
+++ b/security/tomoyo/realpath.c
@@ -0,0 +1,329 @@
+/*
+ * security/tomoyo/realpath.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/magic.h>
+
+/**
+ * tomoyo_encode2 - Encode binary string to ascii string.
+ *
+ * @str:     String in binary format.
+ * @str_len: Size of @str in byte.
+ *
+ * Returns pointer to @str in ascii format on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+char *tomoyo_encode2(const char *str, int str_len)
+{
+	int i;
+	int len = 0;
+	const char *p = str;
+	char *cp;
+	char *cp0;
+
+	if (!p)
+		return NULL;
+	for (i = 0; i < str_len; i++) {
+		const unsigned char c = p[i];
+
+		if (c == '\\')
+			len += 2;
+		else if (c > ' ' && c < 127)
+			len++;
+		else
+			len += 4;
+	}
+	len++;
+	/* Reserve space for appending "/". */
+	cp = kzalloc(len + 10, GFP_NOFS);
+	if (!cp)
+		return NULL;
+	cp0 = cp;
+	p = str;
+	for (i = 0; i < str_len; i++) {
+		const unsigned char c = p[i];
+
+		if (c == '\\') {
+			*cp++ = '\\';
+			*cp++ = '\\';
+		} else if (c > ' ' && c < 127) {
+			*cp++ = c;
+		} else {
+			*cp++ = '\\';
+			*cp++ = (c >> 6) + '0';
+			*cp++ = ((c >> 3) & 7) + '0';
+			*cp++ = (c & 7) + '0';
+		}
+	}
+	return cp0;
+}
+
+/**
+ * tomoyo_encode - Encode binary string to ascii string.
+ *
+ * @str: String in binary format.
+ *
+ * Returns pointer to @str in ascii format on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+char *tomoyo_encode(const char *str)
+{
+	return str ? tomoyo_encode2(str, strlen(str)) : NULL;
+}
+
+/**
+ * tomoyo_get_absolute_path - Get the path of a dentry but ignores chroot'ed root.
+ *
+ * @path:   Pointer to "struct path".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
+ *
+ * Returns the buffer on success, an error code otherwise.
+ *
+ * If dentry is a directory, trailing '/' is appended.
+ */
+static char *tomoyo_get_absolute_path(const struct path *path, char * const buffer,
+				      const int buflen)
+{
+	char *pos = ERR_PTR(-ENOMEM);
+	if (buflen >= 256) {
+		/* go to whatever namespace root we are under */
+		pos = d_absolute_path(path, buffer, buflen - 1);
+		if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
+			struct inode *inode = d_backing_inode(path->dentry);
+			if (inode && S_ISDIR(inode->i_mode)) {
+				buffer[buflen - 2] = '/';
+				buffer[buflen - 1] = '\0';
+			}
+		}
+	}
+	return pos;
+}
+
+/**
+ * tomoyo_get_dentry_path - Get the path of a dentry.
+ *
+ * @dentry: Pointer to "struct dentry".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
+ *
+ * Returns the buffer on success, an error code otherwise.
+ *
+ * If dentry is a directory, trailing '/' is appended.
+ */
+static char *tomoyo_get_dentry_path(struct dentry *dentry, char * const buffer,
+				    const int buflen)
+{
+	char *pos = ERR_PTR(-ENOMEM);
+	if (buflen >= 256) {
+		pos = dentry_path_raw(dentry, buffer, buflen - 1);
+		if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
+			struct inode *inode = d_backing_inode(dentry);
+			if (inode && S_ISDIR(inode->i_mode)) {
+				buffer[buflen - 2] = '/';
+				buffer[buflen - 1] = '\0';
+			}
+		}
+	}
+	return pos;
+}
+
+/**
+ * tomoyo_get_local_path - Get the path of a dentry.
+ *
+ * @dentry: Pointer to "struct dentry".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
+ *
+ * Returns the buffer on success, an error code otherwise.
+ */
+static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
+				   const int buflen)
+{
+	struct super_block *sb = dentry->d_sb;
+	char *pos = tomoyo_get_dentry_path(dentry, buffer, buflen);
+	if (IS_ERR(pos))
+		return pos;
+	/* Convert from $PID to self if $PID is current thread. */
+	if (sb->s_magic == PROC_SUPER_MAGIC && *pos == '/') {
+		char *ep;
+		const pid_t pid = (pid_t) simple_strtoul(pos + 1, &ep, 10);
+		if (*ep == '/' && pid && pid ==
+		    task_tgid_nr_ns(current, sb->s_fs_info)) {
+			pos = ep - 5;
+			if (pos < buffer)
+				goto out;
+			memmove(pos, "/self", 5);
+		}
+		goto prepend_filesystem_name;
+	}
+	/* Use filesystem name for unnamed devices. */
+	if (!MAJOR(sb->s_dev))
+		goto prepend_filesystem_name;
+	{
+		struct inode *inode = d_backing_inode(sb->s_root);
+		/*
+		 * Use filesystem name if filesystem does not support rename()
+		 * operation.
+		 */
+		if (!inode->i_op->rename && !inode->i_op->rename2)
+			goto prepend_filesystem_name;
+	}
+	/* Prepend device name. */
+	{
+		char name[64];
+		int name_len;
+		const dev_t dev = sb->s_dev;
+		name[sizeof(name) - 1] = '\0';
+		snprintf(name, sizeof(name) - 1, "dev(%u,%u):", MAJOR(dev),
+			 MINOR(dev));
+		name_len = strlen(name);
+		pos -= name_len;
+		if (pos < buffer)
+			goto out;
+		memmove(pos, name, name_len);
+		return pos;
+	}
+	/* Prepend filesystem name. */
+prepend_filesystem_name:
+	{
+		const char *name = sb->s_type->name;
+		const int name_len = strlen(name);
+		pos -= name_len + 1;
+		if (pos < buffer)
+			goto out;
+		memmove(pos, name, name_len);
+		pos[name_len] = ':';
+	}
+	return pos;
+out:
+	return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * tomoyo_get_socket_name - Get the name of a socket.
+ *
+ * @path:   Pointer to "struct path".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
+ *
+ * Returns the buffer.
+ */
+static char *tomoyo_get_socket_name(const struct path *path, char * const buffer,
+				    const int buflen)
+{
+	struct inode *inode = d_backing_inode(path->dentry);
+	struct socket *sock = inode ? SOCKET_I(inode) : NULL;
+	struct sock *sk = sock ? sock->sk : NULL;
+	if (sk) {
+		snprintf(buffer, buflen, "socket:[family=%u:type=%u:"
+			 "protocol=%u]", sk->sk_family, sk->sk_type,
+			 sk->sk_protocol);
+	} else {
+		snprintf(buffer, buflen, "socket:[unknown]");
+	}
+	return buffer;
+}
+
+/**
+ * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
+ *
+ * @path: Pointer to "struct path".
+ *
+ * Returns the realpath of the given @path on success, NULL otherwise.
+ *
+ * If dentry is a directory, trailing '/' is appended.
+ * Characters out of 0x20 < c < 0x7F range are converted to
+ * \ooo style octal string.
+ * Character \ is converted to \\ string.
+ *
+ * These functions use kzalloc(), so the caller must call kfree()
+ * if these functions didn't return NULL.
+ */
+char *tomoyo_realpath_from_path(const struct path *path)
+{
+	char *buf = NULL;
+	char *name = NULL;
+	unsigned int buf_len = PAGE_SIZE / 2;
+	struct dentry *dentry = path->dentry;
+	struct super_block *sb;
+	if (!dentry)
+		return NULL;
+	sb = dentry->d_sb;
+	while (1) {
+		char *pos;
+		struct inode *inode;
+		buf_len <<= 1;
+		kfree(buf);
+		buf = kmalloc(buf_len, GFP_NOFS);
+		if (!buf)
+			break;
+		/* To make sure that pos is '\0' terminated. */
+		buf[buf_len - 1] = '\0';
+		/* Get better name for socket. */
+		if (sb->s_magic == SOCKFS_MAGIC) {
+			pos = tomoyo_get_socket_name(path, buf, buf_len - 1);
+			goto encode;
+		}
+		/* For "pipe:[\$]". */
+		if (dentry->d_op && dentry->d_op->d_dname) {
+			pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1);
+			goto encode;
+		}
+		inode = d_backing_inode(sb->s_root);
+		/*
+		 * Get local name for filesystems without rename() operation
+		 * or dentry without vfsmount.
+		 */
+		if (!path->mnt ||
+		    (!inode->i_op->rename && !inode->i_op->rename2))
+			pos = tomoyo_get_local_path(path->dentry, buf,
+						    buf_len - 1);
+		/* Get absolute name for the rest. */
+		else {
+			pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+			/*
+			 * Fall back to local name if absolute name is not
+			 * available.
+			 */
+			if (pos == ERR_PTR(-EINVAL))
+				pos = tomoyo_get_local_path(path->dentry, buf,
+							    buf_len - 1);
+		}
+encode:
+		if (IS_ERR(pos))
+			continue;
+		name = tomoyo_encode(pos);
+		break;
+	}
+	kfree(buf);
+	if (!name)
+		tomoyo_warn_oom(__func__);
+	return name;
+}
+
+/**
+ * tomoyo_realpath_nofollow - Get realpath of a pathname.
+ *
+ * @pathname: The pathname to solve.
+ *
+ * Returns the realpath of @pathname on success, NULL otherwise.
+ */
+char *tomoyo_realpath_nofollow(const char *pathname)
+{
+	struct path path;
+
+	if (pathname && kern_path(pathname, 0, &path) == 0) {
+		char *buf = tomoyo_realpath_from_path(&path);
+		path_put(&path);
+		return buf;
+	}
+	return NULL;
+}
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
new file mode 100644
index 0000000..179a955
--- /dev/null
+++ b/security/tomoyo/securityfs_if.c
@@ -0,0 +1,272 @@
+/*
+ * security/tomoyo/securityfs_if.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include <linux/security.h>
+#include "common.h"
+
+/**
+ * tomoyo_check_task_acl - Check permission for task operation.
+ *
+ * @r:   Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_task_acl(struct tomoyo_request_info *r,
+				  const struct tomoyo_acl_info *ptr)
+{
+	const struct tomoyo_task_acl *acl = container_of(ptr, typeof(*acl),
+							 head);
+	return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname);
+}
+
+/**
+ * tomoyo_write_self - write() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file:  Pointer to "struct file".
+ * @buf:   Domainname to transit to.
+ * @count: Size of @buf.
+ * @ppos:  Unused.
+ *
+ * Returns @count on success, negative value otherwise.
+ *
+ * If domain transition was permitted but the domain transition failed, this
+ * function returns error rather than terminating current thread with SIGKILL.
+ */
+static ssize_t tomoyo_write_self(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	char *data;
+	int error;
+	if (!count || count >= TOMOYO_EXEC_TMPSIZE - 10)
+		return -ENOMEM;
+	data = kzalloc(count + 1, GFP_NOFS);
+	if (!data)
+		return -ENOMEM;
+	if (copy_from_user(data, buf, count)) {
+		error = -EFAULT;
+		goto out;
+	}
+	tomoyo_normalize_line(data);
+	if (tomoyo_correct_domain(data)) {
+		const int idx = tomoyo_read_lock();
+		struct tomoyo_path_info name;
+		struct tomoyo_request_info r;
+		name.name = data;
+		tomoyo_fill_path_info(&name);
+		/* Check "task manual_domain_transition" permission. */
+		tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE);
+		r.param_type = TOMOYO_TYPE_MANUAL_TASK_ACL;
+		r.param.task.domainname = &name;
+		tomoyo_check_acl(&r, tomoyo_check_task_acl);
+		if (!r.granted)
+			error = -EPERM;
+		else {
+			struct tomoyo_domain_info *new_domain =
+				tomoyo_assign_domain(data, true);
+			if (!new_domain) {
+				error = -ENOENT;
+			} else {
+				struct cred *cred = prepare_creds();
+				if (!cred) {
+					error = -ENOMEM;
+				} else {
+					struct tomoyo_domain_info *old_domain =
+						cred->security;
+					cred->security = new_domain;
+					atomic_inc(&new_domain->users);
+					atomic_dec(&old_domain->users);
+					commit_creds(cred);
+					error = 0;
+				}
+			}
+		}
+		tomoyo_read_unlock(idx);
+	} else
+		error = -EINVAL;
+out:
+	kfree(data);
+	return error ? error : count;
+}
+
+/**
+ * tomoyo_read_self - read() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file:  Pointer to "struct file".
+ * @buf:   Domainname which current thread belongs to.
+ * @count: Size of @buf.
+ * @ppos:  Bytes read by now.
+ *
+ * Returns read size on success, negative value otherwise.
+ */
+static ssize_t tomoyo_read_self(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	const char *domain = tomoyo_domain()->domainname->name;
+	loff_t len = strlen(domain);
+	loff_t pos = *ppos;
+	if (pos >= len || !count)
+		return 0;
+	len -= pos;
+	if (count < len)
+		len = count;
+	if (copy_to_user(buf, domain + pos, len))
+		return -EFAULT;
+	*ppos += len;
+	return len;
+}
+
+/* Operations for /sys/kernel/security/tomoyo/self_domain interface. */
+static const struct file_operations tomoyo_self_operations = {
+	.write = tomoyo_write_self,
+	.read  = tomoyo_read_self,
+};
+
+/**
+ * tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @inode: Pointer to "struct inode".
+ * @file:  Pointer to "struct file".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_open(struct inode *inode, struct file *file)
+{
+	const int key = ((u8 *) file_inode(file)->i_private)
+		- ((u8 *) NULL);
+	return tomoyo_open_control(key, file);
+}
+
+/**
+ * tomoyo_release - close() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file:  Pointer to "struct file".
+ *
+ */
+static int tomoyo_release(struct inode *inode, struct file *file)
+{
+	tomoyo_close_control(file->private_data);
+	return 0;
+}
+
+/**
+ * tomoyo_poll - poll() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table". Maybe NULL.
+ *
+ * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
+ * POLLOUT | POLLWRNORM otherwise.
+ */
+static unsigned int tomoyo_poll(struct file *file, poll_table *wait)
+{
+	return tomoyo_poll_control(file, wait);
+}
+
+/**
+ * tomoyo_read - read() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file:  Pointer to "struct file".
+ * @buf:   Pointer to buffer.
+ * @count: Size of @buf.
+ * @ppos:  Unused.
+ *
+ * Returns bytes read on success, negative value otherwise.
+ */
+static ssize_t tomoyo_read(struct file *file, char __user *buf, size_t count,
+			   loff_t *ppos)
+{
+	return tomoyo_read_control(file->private_data, buf, count);
+}
+
+/**
+ * tomoyo_write - write() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file:  Pointer to "struct file".
+ * @buf:   Pointer to buffer.
+ * @count: Size of @buf.
+ * @ppos:  Unused.
+ *
+ * Returns @count on success, negative value otherwise.
+ */
+static ssize_t tomoyo_write(struct file *file, const char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	return tomoyo_write_control(file->private_data, buf, count);
+}
+
+/*
+ * tomoyo_operations is a "struct file_operations" which is used for handling
+ * /sys/kernel/security/tomoyo/ interface.
+ *
+ * Some files under /sys/kernel/security/tomoyo/ directory accept open(O_RDWR).
+ * See tomoyo_io_buffer for internals.
+ */
+static const struct file_operations tomoyo_operations = {
+	.open    = tomoyo_open,
+	.release = tomoyo_release,
+	.poll    = tomoyo_poll,
+	.read    = tomoyo_read,
+	.write   = tomoyo_write,
+	.llseek  = noop_llseek,
+};
+
+/**
+ * tomoyo_create_entry - Create interface files under /sys/kernel/security/tomoyo/ directory.
+ *
+ * @name:   The name of the interface file.
+ * @mode:   The permission of the interface file.
+ * @parent: The parent directory.
+ * @key:    Type of interface.
+ *
+ * Returns nothing.
+ */
+static void __init tomoyo_create_entry(const char *name, const umode_t mode,
+				       struct dentry *parent, const u8 key)
+{
+	securityfs_create_file(name, mode, parent, ((u8 *) NULL) + key,
+			       &tomoyo_operations);
+}
+
+/**
+ * tomoyo_initerface_init - Initialize /sys/kernel/security/tomoyo/ interface.
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_initerface_init(void)
+{
+	struct dentry *tomoyo_dir;
+
+	/* Don't create securityfs entries unless registered. */
+	if (current_cred()->security != &tomoyo_kernel_domain)
+		return 0;
+
+	tomoyo_dir = securityfs_create_dir("tomoyo", NULL);
+	tomoyo_create_entry("query",            0600, tomoyo_dir,
+			    TOMOYO_QUERY);
+	tomoyo_create_entry("domain_policy",    0600, tomoyo_dir,
+			    TOMOYO_DOMAINPOLICY);
+	tomoyo_create_entry("exception_policy", 0600, tomoyo_dir,
+			    TOMOYO_EXCEPTIONPOLICY);
+	tomoyo_create_entry("audit",            0400, tomoyo_dir,
+			    TOMOYO_AUDIT);
+	tomoyo_create_entry(".process_status",  0600, tomoyo_dir,
+			    TOMOYO_PROCESS_STATUS);
+	tomoyo_create_entry("stat",             0644, tomoyo_dir,
+			    TOMOYO_STAT);
+	tomoyo_create_entry("profile",          0600, tomoyo_dir,
+			    TOMOYO_PROFILE);
+	tomoyo_create_entry("manager",          0600, tomoyo_dir,
+			    TOMOYO_MANAGER);
+	tomoyo_create_entry("version",          0400, tomoyo_dir,
+			    TOMOYO_VERSION);
+	securityfs_create_file("self_domain", 0666, tomoyo_dir, NULL,
+			       &tomoyo_self_operations);
+	tomoyo_load_builtin_policy();
+	return 0;
+}
+
+fs_initcall(tomoyo_initerface_init);
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
new file mode 100644
index 0000000..cbf3df4
--- /dev/null
+++ b/security/tomoyo/tomoyo.c
@@ -0,0 +1,552 @@
+/*
+ * security/tomoyo/tomoyo.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include <linux/lsm_hooks.h>
+#include "common.h"
+
+/**
+ * tomoyo_cred_alloc_blank - Target for security_cred_alloc_blank().
+ *
+ * @new: Pointer to "struct cred".
+ * @gfp: Memory allocation flags.
+ *
+ * Returns 0.
+ */
+static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
+{
+	new->security = NULL;
+	return 0;
+}
+
+/**
+ * tomoyo_cred_prepare - Target for security_prepare_creds().
+ *
+ * @new: Pointer to "struct cred".
+ * @old: Pointer to "struct cred".
+ * @gfp: Memory allocation flags.
+ *
+ * Returns 0.
+ */
+static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
+			       gfp_t gfp)
+{
+	struct tomoyo_domain_info *domain = old->security;
+	new->security = domain;
+	if (domain)
+		atomic_inc(&domain->users);
+	return 0;
+}
+
+/**
+ * tomoyo_cred_transfer - Target for security_transfer_creds().
+ *
+ * @new: Pointer to "struct cred".
+ * @old: Pointer to "struct cred".
+ */
+static void tomoyo_cred_transfer(struct cred *new, const struct cred *old)
+{
+	tomoyo_cred_prepare(new, old, 0);
+}
+
+/**
+ * tomoyo_cred_free - Target for security_cred_free().
+ *
+ * @cred: Pointer to "struct cred".
+ */
+static void tomoyo_cred_free(struct cred *cred)
+{
+	struct tomoyo_domain_info *domain = cred->security;
+	if (domain)
+		atomic_dec(&domain->users);
+}
+
+/**
+ * tomoyo_bprm_set_creds - Target for security_bprm_set_creds().
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
+{
+	/*
+	 * Do only if this function is called for the first time of an execve
+	 * operation.
+	 */
+	if (bprm->cred_prepared)
+		return 0;
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+	/*
+	 * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
+	 * for the first time.
+	 */
+	if (!tomoyo_policy_loaded)
+		tomoyo_load_policy(bprm->filename);
+#endif
+	/*
+	 * Release reference to "struct tomoyo_domain_info" stored inside
+	 * "bprm->cred->security". New reference to "struct tomoyo_domain_info"
+	 * stored inside "bprm->cred->security" will be acquired later inside
+	 * tomoyo_find_next_domain().
+	 */
+	atomic_dec(&((struct tomoyo_domain_info *)
+		     bprm->cred->security)->users);
+	/*
+	 * Tell tomoyo_bprm_check_security() is called for the first time of an
+	 * execve operation.
+	 */
+	bprm->cred->security = NULL;
+	return 0;
+}
+
+/**
+ * tomoyo_bprm_check_security - Target for security_bprm_check().
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
+{
+	struct tomoyo_domain_info *domain = bprm->cred->security;
+
+	/*
+	 * Execute permission is checked against pathname passed to do_execve()
+	 * using current domain.
+	 */
+	if (!domain) {
+		const int idx = tomoyo_read_lock();
+		const int err = tomoyo_find_next_domain(bprm);
+		tomoyo_read_unlock(idx);
+		return err;
+	}
+	/*
+	 * Read permission is checked against interpreters using next domain.
+	 */
+	return tomoyo_check_open_permission(domain, &bprm->file->f_path,
+					    O_RDONLY);
+}
+
+/**
+ * tomoyo_inode_getattr - Target for security_inode_getattr().
+ *
+ * @mnt:    Pointer to "struct vfsmount".
+ * @dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_inode_getattr(const struct path *path)
+{
+	return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, path, NULL);
+}
+
+/**
+ * tomoyo_path_truncate - Target for security_path_truncate().
+ *
+ * @path: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_truncate(struct path *path)
+{
+	return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path, NULL);
+}
+
+/**
+ * tomoyo_path_unlink - Target for security_path_unlink().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
+{
+	struct path path = { parent->mnt, dentry };
+	return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL);
+}
+
+/**
+ * tomoyo_path_mkdir - Target for security_path_mkdir().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ * @mode:   DAC permission mode.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
+			     umode_t mode)
+{
+	struct path path = { parent->mnt, dentry };
+	return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path,
+				       mode & S_IALLUGO);
+}
+
+/**
+ * tomoyo_path_rmdir - Target for security_path_rmdir().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
+{
+	struct path path = { parent->mnt, dentry };
+	return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL);
+}
+
+/**
+ * tomoyo_path_symlink - Target for security_path_symlink().
+ *
+ * @parent:   Pointer to "struct path".
+ * @dentry:   Pointer to "struct dentry".
+ * @old_name: Symlink's content.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
+			       const char *old_name)
+{
+	struct path path = { parent->mnt, dentry };
+	return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name);
+}
+
+/**
+ * tomoyo_path_mknod - Target for security_path_mknod().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ * @mode:   DAC permission mode.
+ * @dev:    Device attributes.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
+			     umode_t mode, unsigned int dev)
+{
+	struct path path = { parent->mnt, dentry };
+	int type = TOMOYO_TYPE_CREATE;
+	const unsigned int perm = mode & S_IALLUGO;
+
+	switch (mode & S_IFMT) {
+	case S_IFCHR:
+		type = TOMOYO_TYPE_MKCHAR;
+		break;
+	case S_IFBLK:
+		type = TOMOYO_TYPE_MKBLOCK;
+		break;
+	default:
+		goto no_dev;
+	}
+	return tomoyo_mkdev_perm(type, &path, perm, dev);
+ no_dev:
+	switch (mode & S_IFMT) {
+	case S_IFIFO:
+		type = TOMOYO_TYPE_MKFIFO;
+		break;
+	case S_IFSOCK:
+		type = TOMOYO_TYPE_MKSOCK;
+		break;
+	}
+	return tomoyo_path_number_perm(type, &path, perm);
+}
+
+/**
+ * tomoyo_path_link - Target for security_path_link().
+ *
+ * @old_dentry: Pointer to "struct dentry".
+ * @new_dir:    Pointer to "struct path".
+ * @new_dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
+			    struct dentry *new_dentry)
+{
+	struct path path1 = { new_dir->mnt, old_dentry };
+	struct path path2 = { new_dir->mnt, new_dentry };
+	return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
+}
+
+/**
+ * tomoyo_path_rename - Target for security_path_rename().
+ *
+ * @old_parent: Pointer to "struct path".
+ * @old_dentry: Pointer to "struct dentry".
+ * @new_parent: Pointer to "struct path".
+ * @new_dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_rename(struct path *old_parent,
+			      struct dentry *old_dentry,
+			      struct path *new_parent,
+			      struct dentry *new_dentry)
+{
+	struct path path1 = { old_parent->mnt, old_dentry };
+	struct path path2 = { new_parent->mnt, new_dentry };
+	return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
+}
+
+/**
+ * tomoyo_file_fcntl - Target for security_file_fcntl().
+ *
+ * @file: Pointer to "struct file".
+ * @cmd:  Command for fcntl().
+ * @arg:  Argument for @cmd.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	if (!(cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND)))
+		return 0;
+	return tomoyo_check_open_permission(tomoyo_domain(), &file->f_path,
+					    O_WRONLY | (arg & O_APPEND));
+}
+
+/**
+ * tomoyo_file_open - Target for security_file_open().
+ *
+ * @f:    Pointer to "struct file".
+ * @cred: Pointer to "struct cred".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_file_open(struct file *f, const struct cred *cred)
+{
+	int flags = f->f_flags;
+	/* Don't check read permission here if called from do_execve(). */
+	if (current->in_execve)
+		return 0;
+	return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags);
+}
+
+/**
+ * tomoyo_file_ioctl - Target for security_file_ioctl().
+ *
+ * @file: Pointer to "struct file".
+ * @cmd:  Command for ioctl().
+ * @arg:  Argument for @cmd.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	return tomoyo_path_number_perm(TOMOYO_TYPE_IOCTL, &file->f_path, cmd);
+}
+
+/**
+ * tomoyo_path_chmod - Target for security_path_chmod().
+ *
+ * @path: Pointer to "struct path".
+ * @mode: DAC permission mode.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_chmod(struct path *path, umode_t mode)
+{
+	return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path,
+				       mode & S_IALLUGO);
+}
+
+/**
+ * tomoyo_path_chown - Target for security_path_chown().
+ *
+ * @path: Pointer to "struct path".
+ * @uid:  Owner ID.
+ * @gid:  Group ID.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+{
+	int error = 0;
+	if (uid_valid(uid))
+		error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path,
+						from_kuid(&init_user_ns, uid));
+	if (!error && gid_valid(gid))
+		error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path,
+						from_kgid(&init_user_ns, gid));
+	return error;
+}
+
+/**
+ * tomoyo_path_chroot - Target for security_path_chroot().
+ *
+ * @path: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_chroot(struct path *path)
+{
+	return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path, NULL);
+}
+
+/**
+ * tomoyo_sb_mount - Target for security_sb_mount().
+ *
+ * @dev_name: Name of device file. Maybe NULL.
+ * @path:     Pointer to "struct path".
+ * @type:     Name of filesystem type. Maybe NULL.
+ * @flags:    Mount options.
+ * @data:     Optional data. Maybe NULL.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_sb_mount(const char *dev_name, struct path *path,
+			   const char *type, unsigned long flags, void *data)
+{
+	return tomoyo_mount_permission(dev_name, path, type, flags, data);
+}
+
+/**
+ * tomoyo_sb_umount - Target for security_sb_umount().
+ *
+ * @mnt:   Pointer to "struct vfsmount".
+ * @flags: Unmount options.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
+{
+	struct path path = { mnt, mnt->mnt_root };
+	return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL);
+}
+
+/**
+ * tomoyo_sb_pivotroot - Target for security_sb_pivotroot().
+ *
+ * @old_path: Pointer to "struct path".
+ * @new_path: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
+{
+	return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
+}
+
+/**
+ * tomoyo_socket_listen - Check permission for listen().
+ *
+ * @sock:    Pointer to "struct socket".
+ * @backlog: Backlog parameter.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_listen(struct socket *sock, int backlog)
+{
+	return tomoyo_socket_listen_permission(sock);
+}
+
+/**
+ * tomoyo_socket_connect - Check permission for connect().
+ *
+ * @sock:     Pointer to "struct socket".
+ * @addr:     Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr,
+				 int addr_len)
+{
+	return tomoyo_socket_connect_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_bind - Check permission for bind().
+ *
+ * @sock:     Pointer to "struct socket".
+ * @addr:     Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr,
+			      int addr_len)
+{
+	return tomoyo_socket_bind_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_sendmsg - Check permission for sendmsg().
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg:  Pointer to "struct msghdr".
+ * @size: Size of message.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+				 int size)
+{
+	return tomoyo_socket_sendmsg_permission(sock, msg, size);
+}
+
+/*
+ * tomoyo_security_ops is a "struct security_operations" which is used for
+ * registering TOMOYO.
+ */
+static struct security_hook_list tomoyo_hooks[] = {
+	LSM_HOOK_INIT(cred_alloc_blank, tomoyo_cred_alloc_blank),
+	LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare),
+	LSM_HOOK_INIT(cred_transfer, tomoyo_cred_transfer),
+	LSM_HOOK_INIT(cred_free, tomoyo_cred_free),
+	LSM_HOOK_INIT(bprm_set_creds, tomoyo_bprm_set_creds),
+	LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security),
+	LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl),
+	LSM_HOOK_INIT(file_open, tomoyo_file_open),
+	LSM_HOOK_INIT(path_truncate, tomoyo_path_truncate),
+	LSM_HOOK_INIT(path_unlink, tomoyo_path_unlink),
+	LSM_HOOK_INIT(path_mkdir, tomoyo_path_mkdir),
+	LSM_HOOK_INIT(path_rmdir, tomoyo_path_rmdir),
+	LSM_HOOK_INIT(path_symlink, tomoyo_path_symlink),
+	LSM_HOOK_INIT(path_mknod, tomoyo_path_mknod),
+	LSM_HOOK_INIT(path_link, tomoyo_path_link),
+	LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
+	LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
+	LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
+	LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
+	LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
+	LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
+	LSM_HOOK_INIT(sb_mount, tomoyo_sb_mount),
+	LSM_HOOK_INIT(sb_umount, tomoyo_sb_umount),
+	LSM_HOOK_INIT(sb_pivotroot, tomoyo_sb_pivotroot),
+	LSM_HOOK_INIT(socket_bind, tomoyo_socket_bind),
+	LSM_HOOK_INIT(socket_connect, tomoyo_socket_connect),
+	LSM_HOOK_INIT(socket_listen, tomoyo_socket_listen),
+	LSM_HOOK_INIT(socket_sendmsg, tomoyo_socket_sendmsg),
+};
+
+/* Lock for GC. */
+DEFINE_SRCU(tomoyo_ss);
+
+/**
+ * tomoyo_init - Register TOMOYO Linux as a LSM module.
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_init(void)
+{
+	struct cred *cred = (struct cred *) current_cred();
+
+	if (!security_module_enable("tomoyo"))
+		return 0;
+	/* register ourselves with the security framework */
+	security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks));
+	printk(KERN_INFO "TOMOYO Linux initialized\n");
+	cred->security = &tomoyo_kernel_domain;
+	tomoyo_mm_init();
+	return 0;
+}
+
+security_initcall(tomoyo_init);
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
new file mode 100644
index 0000000..b974a69
--- /dev/null
+++ b/security/tomoyo/util.c
@@ -0,0 +1,1088 @@
+/*
+ * security/tomoyo/util.c
+ *
+ * Copyright (C) 2005-2011  NTT DATA CORPORATION
+ */
+
+#include <linux/slab.h>
+#include "common.h"
+
+/* Lock for protecting policy. */
+DEFINE_MUTEX(tomoyo_policy_lock);
+
+/* Has /sbin/init started? */
+bool tomoyo_policy_loaded;
+
+/*
+ * Mapping table from "enum tomoyo_mac_index" to
+ * "enum tomoyo_mac_category_index".
+ */
+const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
+	/* CONFIG::file group */
+	[TOMOYO_MAC_FILE_EXECUTE]    = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_OPEN]       = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_CREATE]     = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_UNLINK]     = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_GETATTR]    = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_MKDIR]      = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_RMDIR]      = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_MKFIFO]     = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_MKSOCK]     = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_TRUNCATE]   = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_SYMLINK]    = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_MKBLOCK]    = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_MKCHAR]     = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_LINK]       = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_RENAME]     = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_CHMOD]      = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_CHOWN]      = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_CHGRP]      = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_IOCTL]      = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_CHROOT]     = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_MOUNT]      = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_UMOUNT]     = TOMOYO_MAC_CATEGORY_FILE,
+	[TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE,
+	/* CONFIG::network group */
+	[TOMOYO_MAC_NETWORK_INET_STREAM_BIND]       =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN]     =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT]    =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_INET_DGRAM_BIND]        =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_INET_DGRAM_SEND]        =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_INET_RAW_BIND]          =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_INET_RAW_SEND]          =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND]       =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN]     =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT]    =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND]        =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND]        =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND]    =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN]  =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] =
+	TOMOYO_MAC_CATEGORY_NETWORK,
+	/* CONFIG::misc group */
+	[TOMOYO_MAC_ENVIRON]         = TOMOYO_MAC_CATEGORY_MISC,
+};
+
+/**
+ * tomoyo_convert_time - Convert time_t to YYYY/MM/DD hh/mm/ss.
+ *
+ * @time:  Seconds since 1970/01/01 00:00:00.
+ * @stamp: Pointer to "struct tomoyo_time".
+ *
+ * Returns nothing.
+ *
+ * This function does not handle Y2038 problem.
+ */
+void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp)
+{
+	static const u16 tomoyo_eom[2][12] = {
+		{ 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+		{ 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+	};
+	u16 y;
+	u8 m;
+	bool r;
+	stamp->sec = time % 60;
+	time /= 60;
+	stamp->min = time % 60;
+	time /= 60;
+	stamp->hour = time % 24;
+	time /= 24;
+	for (y = 1970; ; y++) {
+		const unsigned short days = (y & 3) ? 365 : 366;
+		if (time < days)
+			break;
+		time -= days;
+	}
+	r = (y & 3) == 0;
+	for (m = 0; m < 11 && time >= tomoyo_eom[r][m]; m++)
+		;
+	if (m)
+		time -= tomoyo_eom[r][m - 1];
+	stamp->year = y;
+	stamp->month = ++m;
+	stamp->day = ++time;
+}
+
+/**
+ * tomoyo_permstr - Find permission keywords.
+ *
+ * @string: String representation for permissions in foo/bar/buz format.
+ * @keyword: Keyword to find from @string/
+ *
+ * Returns ture if @keyword was found in @string, false otherwise.
+ *
+ * This function assumes that strncmp(w1, w2, strlen(w1)) != 0 if w1 != w2.
+ */
+bool tomoyo_permstr(const char *string, const char *keyword)
+{
+	const char *cp = strstr(string, keyword);
+	if (cp)
+		return cp == string || *(cp - 1) == '/';
+	return false;
+}
+
+/**
+ * tomoyo_read_token - Read a word from a line.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns a word on success, "" otherwise.
+ *
+ * To allow the caller to skip NULL check, this function returns "" rather than
+ * NULL if there is no more words to read.
+ */
+char *tomoyo_read_token(struct tomoyo_acl_param *param)
+{
+	char *pos = param->data;
+	char *del = strchr(pos, ' ');
+	if (del)
+		*del++ = '\0';
+	else
+		del = pos + strlen(pos);
+	param->data = del;
+	return pos;
+}
+
+/**
+ * tomoyo_get_domainname - Read a domainname from a line.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns a domainname on success, NULL otherwise.
+ */
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param)
+{
+	char *start = param->data;
+	char *pos = start;
+	while (*pos) {
+		if (*pos++ != ' ' || *pos++ == '/')
+			continue;
+		pos -= 2;
+		*pos++ = '\0';
+		break;
+	}
+	param->data = pos;
+	if (tomoyo_correct_domain(start))
+		return tomoyo_get_name(start);
+	return NULL;
+}
+
+/**
+ * tomoyo_parse_ulong - Parse an "unsigned long" value.
+ *
+ * @result: Pointer to "unsigned long".
+ * @str:    Pointer to string to parse.
+ *
+ * Returns one of values in "enum tomoyo_value_type".
+ *
+ * The @src is updated to point the first character after the value
+ * on success.
+ */
+u8 tomoyo_parse_ulong(unsigned long *result, char **str)
+{
+	const char *cp = *str;
+	char *ep;
+	int base = 10;
+	if (*cp == '0') {
+		char c = *(cp + 1);
+		if (c == 'x' || c == 'X') {
+			base = 16;
+			cp += 2;
+		} else if (c >= '0' && c <= '7') {
+			base = 8;
+			cp++;
+		}
+	}
+	*result = simple_strtoul(cp, &ep, base);
+	if (cp == ep)
+		return TOMOYO_VALUE_TYPE_INVALID;
+	*str = ep;
+	switch (base) {
+	case 16:
+		return TOMOYO_VALUE_TYPE_HEXADECIMAL;
+	case 8:
+		return TOMOYO_VALUE_TYPE_OCTAL;
+	default:
+		return TOMOYO_VALUE_TYPE_DECIMAL;
+	}
+}
+
+/**
+ * tomoyo_print_ulong - Print an "unsigned long" value.
+ *
+ * @buffer:     Pointer to buffer.
+ * @buffer_len: Size of @buffer.
+ * @value:      An "unsigned long" value.
+ * @type:       Type of @value.
+ *
+ * Returns nothing.
+ */
+void tomoyo_print_ulong(char *buffer, const int buffer_len,
+			const unsigned long value, const u8 type)
+{
+	if (type == TOMOYO_VALUE_TYPE_DECIMAL)
+		snprintf(buffer, buffer_len, "%lu", value);
+	else if (type == TOMOYO_VALUE_TYPE_OCTAL)
+		snprintf(buffer, buffer_len, "0%lo", value);
+	else if (type == TOMOYO_VALUE_TYPE_HEXADECIMAL)
+		snprintf(buffer, buffer_len, "0x%lX", value);
+	else
+		snprintf(buffer, buffer_len, "type(%u)", type);
+}
+
+/**
+ * tomoyo_parse_name_union - Parse a tomoyo_name_union.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr:   Pointer to "struct tomoyo_name_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
+			     struct tomoyo_name_union *ptr)
+{
+	char *filename;
+	if (param->data[0] == '@') {
+		param->data++;
+		ptr->group = tomoyo_get_group(param, TOMOYO_PATH_GROUP);
+		return ptr->group != NULL;
+	}
+	filename = tomoyo_read_token(param);
+	if (!tomoyo_correct_word(filename))
+		return false;
+	ptr->filename = tomoyo_get_name(filename);
+	return ptr->filename != NULL;
+}
+
+/**
+ * tomoyo_parse_number_union - Parse a tomoyo_number_union.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr:   Pointer to "struct tomoyo_number_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
+			       struct tomoyo_number_union *ptr)
+{
+	char *data;
+	u8 type;
+	unsigned long v;
+	memset(ptr, 0, sizeof(*ptr));
+	if (param->data[0] == '@') {
+		param->data++;
+		ptr->group = tomoyo_get_group(param, TOMOYO_NUMBER_GROUP);
+		return ptr->group != NULL;
+	}
+	data = tomoyo_read_token(param);
+	type = tomoyo_parse_ulong(&v, &data);
+	if (type == TOMOYO_VALUE_TYPE_INVALID)
+		return false;
+	ptr->values[0] = v;
+	ptr->value_type[0] = type;
+	if (!*data) {
+		ptr->values[1] = v;
+		ptr->value_type[1] = type;
+		return true;
+	}
+	if (*data++ != '-')
+		return false;
+	type = tomoyo_parse_ulong(&v, &data);
+	if (type == TOMOYO_VALUE_TYPE_INVALID || *data || ptr->values[0] > v)
+		return false;
+	ptr->values[1] = v;
+	ptr->value_type[1] = type;
+	return true;
+}
+
+/**
+ * tomoyo_byte_range - Check whether the string is a \ooo style octal value.
+ *
+ * @str: Pointer to the string.
+ *
+ * Returns true if @str is a \ooo style octal value, false otherwise.
+ *
+ * TOMOYO uses \ooo style representation for 0x01 - 0x20 and 0x7F - 0xFF.
+ * This function verifies that \ooo is in valid range.
+ */
+static inline bool tomoyo_byte_range(const char *str)
+{
+	return *str >= '0' && *str++ <= '3' &&
+		*str >= '0' && *str++ <= '7' &&
+		*str >= '0' && *str <= '7';
+}
+
+/**
+ * tomoyo_alphabet_char - Check whether the character is an alphabet.
+ *
+ * @c: The character to check.
+ *
+ * Returns true if @c is an alphabet character, false otherwise.
+ */
+static inline bool tomoyo_alphabet_char(const char c)
+{
+	return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
+}
+
+/**
+ * tomoyo_make_byte - Make byte value from three octal characters.
+ *
+ * @c1: The first character.
+ * @c2: The second character.
+ * @c3: The third character.
+ *
+ * Returns byte value.
+ */
+static inline u8 tomoyo_make_byte(const u8 c1, const u8 c2, const u8 c3)
+{
+	return ((c1 - '0') << 6) + ((c2 - '0') << 3) + (c3 - '0');
+}
+
+/**
+ * tomoyo_valid - Check whether the character is a valid char.
+ *
+ * @c: The character to check.
+ *
+ * Returns true if @c is a valid character, false otherwise.
+ */
+static inline bool tomoyo_valid(const unsigned char c)
+{
+	return c > ' ' && c < 127;
+}
+
+/**
+ * tomoyo_invalid - Check whether the character is an invalid char.
+ *
+ * @c: The character to check.
+ *
+ * Returns true if @c is an invalid character, false otherwise.
+ */
+static inline bool tomoyo_invalid(const unsigned char c)
+{
+	return c && (c <= ' ' || c >= 127);
+}
+
+/**
+ * tomoyo_str_starts - Check whether the given string starts with the given keyword.
+ *
+ * @src:  Pointer to pointer to the string.
+ * @find: Pointer to the keyword.
+ *
+ * Returns true if @src starts with @find, false otherwise.
+ *
+ * The @src is updated to point the first character after the @find
+ * if @src starts with @find.
+ */
+bool tomoyo_str_starts(char **src, const char *find)
+{
+	const int len = strlen(find);
+	char *tmp = *src;
+
+	if (strncmp(tmp, find, len))
+		return false;
+	tmp += len;
+	*src = tmp;
+	return true;
+}
+
+/**
+ * tomoyo_normalize_line - Format string.
+ *
+ * @buffer: The line to normalize.
+ *
+ * Leading and trailing whitespaces are removed.
+ * Multiple whitespaces are packed into single space.
+ *
+ * Returns nothing.
+ */
+void tomoyo_normalize_line(unsigned char *buffer)
+{
+	unsigned char *sp = buffer;
+	unsigned char *dp = buffer;
+	bool first = true;
+
+	while (tomoyo_invalid(*sp))
+		sp++;
+	while (*sp) {
+		if (!first)
+			*dp++ = ' ';
+		first = false;
+		while (tomoyo_valid(*sp))
+			*dp++ = *sp++;
+		while (tomoyo_invalid(*sp))
+			sp++;
+	}
+	*dp = '\0';
+}
+
+/**
+ * tomoyo_correct_word2 - Validate a string.
+ *
+ * @string: The string to check. Maybe non-'\0'-terminated.
+ * @len:    Length of @string.
+ *
+ * Check whether the given string follows the naming rules.
+ * Returns true if @string follows the naming rules, false otherwise.
+ */
+static bool tomoyo_correct_word2(const char *string, size_t len)
+{
+	const char *const start = string;
+	bool in_repetition = false;
+	unsigned char c;
+	unsigned char d;
+	unsigned char e;
+	if (!len)
+		goto out;
+	while (len--) {
+		c = *string++;
+		if (c == '\\') {
+			if (!len--)
+				goto out;
+			c = *string++;
+			switch (c) {
+			case '\\':  /* "\\" */
+				continue;
+			case '$':   /* "\$" */
+			case '+':   /* "\+" */
+			case '?':   /* "\?" */
+			case '*':   /* "\*" */
+			case '@':   /* "\@" */
+			case 'x':   /* "\x" */
+			case 'X':   /* "\X" */
+			case 'a':   /* "\a" */
+			case 'A':   /* "\A" */
+			case '-':   /* "\-" */
+				continue;
+			case '{':   /* "/\{" */
+				if (string - 3 < start || *(string - 3) != '/')
+					break;
+				in_repetition = true;
+				continue;
+			case '}':   /* "\}/" */
+				if (*string != '/')
+					break;
+				if (!in_repetition)
+					break;
+				in_repetition = false;
+				continue;
+			case '0':   /* "\ooo" */
+			case '1':
+			case '2':
+			case '3':
+				if (!len-- || !len--)
+					break;
+				d = *string++;
+				e = *string++;
+				if (d < '0' || d > '7' || e < '0' || e > '7')
+					break;
+				c = tomoyo_make_byte(c, d, e);
+				if (c <= ' ' || c >= 127)
+					continue;
+			}
+			goto out;
+		} else if (in_repetition && c == '/') {
+			goto out;
+		} else if (c <= ' ' || c >= 127) {
+			goto out;
+		}
+	}
+	if (in_repetition)
+		goto out;
+	return true;
+ out:
+	return false;
+}
+
+/**
+ * tomoyo_correct_word - Validate a string.
+ *
+ * @string: The string to check.
+ *
+ * Check whether the given string follows the naming rules.
+ * Returns true if @string follows the naming rules, false otherwise.
+ */
+bool tomoyo_correct_word(const char *string)
+{
+	return tomoyo_correct_word2(string, strlen(string));
+}
+
+/**
+ * tomoyo_correct_path - Validate a pathname.
+ *
+ * @filename: The pathname to check.
+ *
+ * Check whether the given pathname follows the naming rules.
+ * Returns true if @filename follows the naming rules, false otherwise.
+ */
+bool tomoyo_correct_path(const char *filename)
+{
+	return *filename == '/' && tomoyo_correct_word(filename);
+}
+
+/**
+ * tomoyo_correct_domain - Check whether the given domainname follows the naming rules.
+ *
+ * @domainname: The domainname to check.
+ *
+ * Returns true if @domainname follows the naming rules, false otherwise.
+ */
+bool tomoyo_correct_domain(const unsigned char *domainname)
+{
+	if (!domainname || !tomoyo_domain_def(domainname))
+		return false;
+	domainname = strchr(domainname, ' ');
+	if (!domainname++)
+		return true;
+	while (1) {
+		const unsigned char *cp = strchr(domainname, ' ');
+		if (!cp)
+			break;
+		if (*domainname != '/' ||
+		    !tomoyo_correct_word2(domainname, cp - domainname))
+			return false;
+		domainname = cp + 1;
+	}
+	return tomoyo_correct_path(domainname);
+}
+
+/**
+ * tomoyo_domain_def - Check whether the given token can be a domainname.
+ *
+ * @buffer: The token to check.
+ *
+ * Returns true if @buffer possibly be a domainname, false otherwise.
+ */
+bool tomoyo_domain_def(const unsigned char *buffer)
+{
+	const unsigned char *cp;
+	int len;
+	if (*buffer != '<')
+		return false;
+	cp = strchr(buffer, ' ');
+	if (!cp)
+		len = strlen(buffer);
+	else
+		len = cp - buffer;
+	if (buffer[len - 1] != '>' ||
+	    !tomoyo_correct_word2(buffer + 1, len - 2))
+		return false;
+	return true;
+}
+
+/**
+ * tomoyo_find_domain - Find a domain by the given name.
+ *
+ * @domainname: The domainname to find.
+ *
+ * Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
+{
+	struct tomoyo_domain_info *domain;
+	struct tomoyo_path_info name;
+
+	name.name = domainname;
+	tomoyo_fill_path_info(&name);
+	list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+		if (!domain->is_deleted &&
+		    !tomoyo_pathcmp(&name, domain->domainname))
+			return domain;
+	}
+	return NULL;
+}
+
+/**
+ * tomoyo_const_part_length - Evaluate the initial length without a pattern in a token.
+ *
+ * @filename: The string to evaluate.
+ *
+ * Returns the initial length without a pattern in @filename.
+ */
+static int tomoyo_const_part_length(const char *filename)
+{
+	char c;
+	int len = 0;
+
+	if (!filename)
+		return 0;
+	while ((c = *filename++) != '\0') {
+		if (c != '\\') {
+			len++;
+			continue;
+		}
+		c = *filename++;
+		switch (c) {
+		case '\\':  /* "\\" */
+			len += 2;
+			continue;
+		case '0':   /* "\ooo" */
+		case '1':
+		case '2':
+		case '3':
+			c = *filename++;
+			if (c < '0' || c > '7')
+				break;
+			c = *filename++;
+			if (c < '0' || c > '7')
+				break;
+			len += 4;
+			continue;
+		}
+		break;
+	}
+	return len;
+}
+
+/**
+ * tomoyo_fill_path_info - Fill in "struct tomoyo_path_info" members.
+ *
+ * @ptr: Pointer to "struct tomoyo_path_info" to fill in.
+ *
+ * The caller sets "struct tomoyo_path_info"->name.
+ */
+void tomoyo_fill_path_info(struct tomoyo_path_info *ptr)
+{
+	const char *name = ptr->name;
+	const int len = strlen(name);
+
+	ptr->const_len = tomoyo_const_part_length(name);
+	ptr->is_dir = len && (name[len - 1] == '/');
+	ptr->is_patterned = (ptr->const_len < len);
+	ptr->hash = full_name_hash(name, len);
+}
+
+/**
+ * tomoyo_file_matches_pattern2 - Pattern matching without '/' character and "\-" pattern.
+ *
+ * @filename:     The start of string to check.
+ * @filename_end: The end of string to check.
+ * @pattern:      The start of pattern to compare.
+ * @pattern_end:  The end of pattern to compare.
+ *
+ * Returns true if @filename matches @pattern, false otherwise.
+ */
+static bool tomoyo_file_matches_pattern2(const char *filename,
+					 const char *filename_end,
+					 const char *pattern,
+					 const char *pattern_end)
+{
+	while (filename < filename_end && pattern < pattern_end) {
+		char c;
+		if (*pattern != '\\') {
+			if (*filename++ != *pattern++)
+				return false;
+			continue;
+		}
+		c = *filename;
+		pattern++;
+		switch (*pattern) {
+			int i;
+			int j;
+		case '?':
+			if (c == '/') {
+				return false;
+			} else if (c == '\\') {
+				if (filename[1] == '\\')
+					filename++;
+				else if (tomoyo_byte_range(filename + 1))
+					filename += 3;
+				else
+					return false;
+			}
+			break;
+		case '\\':
+			if (c != '\\')
+				return false;
+			if (*++filename != '\\')
+				return false;
+			break;
+		case '+':
+			if (!isdigit(c))
+				return false;
+			break;
+		case 'x':
+			if (!isxdigit(c))
+				return false;
+			break;
+		case 'a':
+			if (!tomoyo_alphabet_char(c))
+				return false;
+			break;
+		case '0':
+		case '1':
+		case '2':
+		case '3':
+			if (c == '\\' && tomoyo_byte_range(filename + 1)
+			    && strncmp(filename + 1, pattern, 3) == 0) {
+				filename += 3;
+				pattern += 2;
+				break;
+			}
+			return false; /* Not matched. */
+		case '*':
+		case '@':
+			for (i = 0; i <= filename_end - filename; i++) {
+				if (tomoyo_file_matches_pattern2(
+						    filename + i, filename_end,
+						    pattern + 1, pattern_end))
+					return true;
+				c = filename[i];
+				if (c == '.' && *pattern == '@')
+					break;
+				if (c != '\\')
+					continue;
+				if (filename[i + 1] == '\\')
+					i++;
+				else if (tomoyo_byte_range(filename + i + 1))
+					i += 3;
+				else
+					break; /* Bad pattern. */
+			}
+			return false; /* Not matched. */
+		default:
+			j = 0;
+			c = *pattern;
+			if (c == '$') {
+				while (isdigit(filename[j]))
+					j++;
+			} else if (c == 'X') {
+				while (isxdigit(filename[j]))
+					j++;
+			} else if (c == 'A') {
+				while (tomoyo_alphabet_char(filename[j]))
+					j++;
+			}
+			for (i = 1; i <= j; i++) {
+				if (tomoyo_file_matches_pattern2(
+						    filename + i, filename_end,
+						    pattern + 1, pattern_end))
+					return true;
+			}
+			return false; /* Not matched or bad pattern. */
+		}
+		filename++;
+		pattern++;
+	}
+	while (*pattern == '\\' &&
+	       (*(pattern + 1) == '*' || *(pattern + 1) == '@'))
+		pattern += 2;
+	return filename == filename_end && pattern == pattern_end;
+}
+
+/**
+ * tomoyo_file_matches_pattern - Pattern matching without '/' character.
+ *
+ * @filename:     The start of string to check.
+ * @filename_end: The end of string to check.
+ * @pattern:      The start of pattern to compare.
+ * @pattern_end:  The end of pattern to compare.
+ *
+ * Returns true if @filename matches @pattern, false otherwise.
+ */
+static bool tomoyo_file_matches_pattern(const char *filename,
+					const char *filename_end,
+					const char *pattern,
+					const char *pattern_end)
+{
+	const char *pattern_start = pattern;
+	bool first = true;
+	bool result;
+
+	while (pattern < pattern_end - 1) {
+		/* Split at "\-" pattern. */
+		if (*pattern++ != '\\' || *pattern++ != '-')
+			continue;
+		result = tomoyo_file_matches_pattern2(filename,
+						      filename_end,
+						      pattern_start,
+						      pattern - 2);
+		if (first)
+			result = !result;
+		if (result)
+			return false;
+		first = false;
+		pattern_start = pattern;
+	}
+	result = tomoyo_file_matches_pattern2(filename, filename_end,
+					      pattern_start, pattern_end);
+	return first ? result : !result;
+}
+
+/**
+ * tomoyo_path_matches_pattern2 - Do pathname pattern matching.
+ *
+ * @f: The start of string to check.
+ * @p: The start of pattern to compare.
+ *
+ * Returns true if @f matches @p, false otherwise.
+ */
+static bool tomoyo_path_matches_pattern2(const char *f, const char *p)
+{
+	const char *f_delimiter;
+	const char *p_delimiter;
+
+	while (*f && *p) {
+		f_delimiter = strchr(f, '/');
+		if (!f_delimiter)
+			f_delimiter = f + strlen(f);
+		p_delimiter = strchr(p, '/');
+		if (!p_delimiter)
+			p_delimiter = p + strlen(p);
+		if (*p == '\\' && *(p + 1) == '{')
+			goto recursive;
+		if (!tomoyo_file_matches_pattern(f, f_delimiter, p,
+						 p_delimiter))
+			return false;
+		f = f_delimiter;
+		if (*f)
+			f++;
+		p = p_delimiter;
+		if (*p)
+			p++;
+	}
+	/* Ignore trailing "\*" and "\@" in @pattern. */
+	while (*p == '\\' &&
+	       (*(p + 1) == '*' || *(p + 1) == '@'))
+		p += 2;
+	return !*f && !*p;
+ recursive:
+	/*
+	 * The "\{" pattern is permitted only after '/' character.
+	 * This guarantees that below "*(p - 1)" is safe.
+	 * Also, the "\}" pattern is permitted only before '/' character
+	 * so that "\{" + "\}" pair will not break the "\-" operator.
+	 */
+	if (*(p - 1) != '/' || p_delimiter <= p + 3 || *p_delimiter != '/' ||
+	    *(p_delimiter - 1) != '}' || *(p_delimiter - 2) != '\\')
+		return false; /* Bad pattern. */
+	do {
+		/* Compare current component with pattern. */
+		if (!tomoyo_file_matches_pattern(f, f_delimiter, p + 2,
+						 p_delimiter - 2))
+			break;
+		/* Proceed to next component. */
+		f = f_delimiter;
+		if (!*f)
+			break;
+		f++;
+		/* Continue comparison. */
+		if (tomoyo_path_matches_pattern2(f, p_delimiter + 1))
+			return true;
+		f_delimiter = strchr(f, '/');
+	} while (f_delimiter);
+	return false; /* Not matched. */
+}
+
+/**
+ * tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern.
+ *
+ * @filename: The filename to check.
+ * @pattern:  The pattern to compare.
+ *
+ * Returns true if matches, false otherwise.
+ *
+ * The following patterns are available.
+ *   \\     \ itself.
+ *   \ooo   Octal representation of a byte.
+ *   \*     Zero or more repetitions of characters other than '/'.
+ *   \@     Zero or more repetitions of characters other than '/' or '.'.
+ *   \?     1 byte character other than '/'.
+ *   \$     One or more repetitions of decimal digits.
+ *   \+     1 decimal digit.
+ *   \X     One or more repetitions of hexadecimal digits.
+ *   \x     1 hexadecimal digit.
+ *   \A     One or more repetitions of alphabet characters.
+ *   \a     1 alphabet character.
+ *
+ *   \-     Subtraction operator.
+ *
+ *   /\{dir\}/   '/' + 'One or more repetitions of dir/' (e.g. /dir/ /dir/dir/
+ *               /dir/dir/dir/ ).
+ */
+bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
+				 const struct tomoyo_path_info *pattern)
+{
+	const char *f = filename->name;
+	const char *p = pattern->name;
+	const int len = pattern->const_len;
+
+	/* If @pattern doesn't contain pattern, I can use strcmp(). */
+	if (!pattern->is_patterned)
+		return !tomoyo_pathcmp(filename, pattern);
+	/* Don't compare directory and non-directory. */
+	if (filename->is_dir != pattern->is_dir)
+		return false;
+	/* Compare the initial length without patterns. */
+	if (strncmp(f, p, len))
+		return false;
+	f += len;
+	p += len;
+	return tomoyo_path_matches_pattern2(f, p);
+}
+
+/**
+ * tomoyo_get_exe - Get tomoyo_realpath() of current process.
+ *
+ * Returns the tomoyo_realpath() of current process on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so the caller must call kfree()
+ * if this function didn't return NULL.
+ */
+const char *tomoyo_get_exe(void)
+{
+	struct file *exe_file;
+	const char *cp;
+	struct mm_struct *mm = current->mm;
+
+	if (!mm)
+		return NULL;
+	exe_file = get_mm_exe_file(mm);
+	if (!exe_file)
+		return NULL;
+
+	cp = tomoyo_realpath_from_path(&exe_file->f_path);
+	fput(exe_file);
+	return cp;
+}
+
+/**
+ * tomoyo_get_mode - Get MAC mode.
+ *
+ * @ns:      Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number.
+ * @index:   Index number of functionality.
+ *
+ * Returns mode.
+ */
+int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
+		    const u8 index)
+{
+	u8 mode;
+	struct tomoyo_profile *p;
+
+	if (!tomoyo_policy_loaded)
+		return TOMOYO_CONFIG_DISABLED;
+	p = tomoyo_profile(ns, profile);
+	mode = p->config[index];
+	if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+		mode = p->config[tomoyo_index2category[index]
+				 + TOMOYO_MAX_MAC_INDEX];
+	if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+		mode = p->default_config;
+	return mode & 3;
+}
+
+/**
+ * tomoyo_init_request_info - Initialize "struct tomoyo_request_info" members.
+ *
+ * @r:      Pointer to "struct tomoyo_request_info" to initialize.
+ * @domain: Pointer to "struct tomoyo_domain_info". NULL for tomoyo_domain().
+ * @index:  Index number of functionality.
+ *
+ * Returns mode.
+ */
+int tomoyo_init_request_info(struct tomoyo_request_info *r,
+			     struct tomoyo_domain_info *domain, const u8 index)
+{
+	u8 profile;
+	memset(r, 0, sizeof(*r));
+	if (!domain)
+		domain = tomoyo_domain();
+	r->domain = domain;
+	profile = domain->profile;
+	r->profile = profile;
+	r->type = index;
+	r->mode = tomoyo_get_mode(domain->ns, profile, index);
+	return r->mode;
+}
+
+/**
+ * tomoyo_domain_quota_is_ok - Check for domain's quota.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns true if the domain is not exceeded quota, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
+{
+	unsigned int count = 0;
+	struct tomoyo_domain_info *domain = r->domain;
+	struct tomoyo_acl_info *ptr;
+
+	if (r->mode != TOMOYO_CONFIG_LEARNING)
+		return false;
+	if (!domain)
+		return true;
+	list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+		u16 perm;
+		u8 i;
+		if (ptr->is_deleted)
+			continue;
+		switch (ptr->type) {
+		case TOMOYO_TYPE_PATH_ACL:
+			perm = container_of(ptr, struct tomoyo_path_acl, head)
+				->perm;
+			break;
+		case TOMOYO_TYPE_PATH2_ACL:
+			perm = container_of(ptr, struct tomoyo_path2_acl, head)
+				->perm;
+			break;
+		case TOMOYO_TYPE_PATH_NUMBER_ACL:
+			perm = container_of(ptr, struct tomoyo_path_number_acl,
+					    head)->perm;
+			break;
+		case TOMOYO_TYPE_MKDEV_ACL:
+			perm = container_of(ptr, struct tomoyo_mkdev_acl,
+					    head)->perm;
+			break;
+		case TOMOYO_TYPE_INET_ACL:
+			perm = container_of(ptr, struct tomoyo_inet_acl,
+					    head)->perm;
+			break;
+		case TOMOYO_TYPE_UNIX_ACL:
+			perm = container_of(ptr, struct tomoyo_unix_acl,
+					    head)->perm;
+			break;
+		case TOMOYO_TYPE_MANUAL_TASK_ACL:
+			perm = 0;
+			break;
+		default:
+			perm = 1;
+		}
+		for (i = 0; i < 16; i++)
+			if (perm & (1 << i))
+				count++;
+	}
+	if (count < tomoyo_profile(domain->ns, domain->profile)->
+	    pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
+		return true;
+	if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) {
+		domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true;
+		/* r->granted = false; */
+		tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
+		printk(KERN_WARNING "WARNING: "
+		       "Domain '%s' has too many ACLs to hold. "
+		       "Stopped learning mode.\n", domain->domainname->name);
+	}
+	return false;
+}
diff --git a/security/yama/Kconfig b/security/yama/Kconfig
new file mode 100644
index 0000000..90c605e
--- /dev/null
+++ b/security/yama/Kconfig
@@ -0,0 +1,12 @@
+config SECURITY_YAMA
+	bool "Yama support"
+	depends on SECURITY
+	default n
+	help
+	  This selects Yama, which extends DAC support with additional
+	  system-wide security settings beyond regular Linux discretionary
+	  access controls. Currently available is ptrace scope restriction.
+	  Like capabilities, this security module stacks with other LSMs.
+	  Further information can be found in Documentation/security/Yama.txt.
+
+	  If you are unsure how to answer this question, answer N.
diff --git a/security/yama/Makefile b/security/yama/Makefile
new file mode 100644
index 0000000..8b5e065
--- /dev/null
+++ b/security/yama/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SECURITY_YAMA) := yama.o
+
+yama-y := yama_lsm.o
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
new file mode 100644
index 0000000..cb6ed10
--- /dev/null
+++ b/security/yama/yama_lsm.c
@@ -0,0 +1,408 @@
+/*
+ * Yama Linux Security Module
+ *
+ * Author: Kees Cook <keescook@chromium.org>
+ *
+ * Copyright (C) 2010 Canonical, Ltd.
+ * Copyright (C) 2011 The Chromium OS Authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/lsm_hooks.h>
+#include <linux/sysctl.h>
+#include <linux/ptrace.h>
+#include <linux/prctl.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+
+#define YAMA_SCOPE_DISABLED	0
+#define YAMA_SCOPE_RELATIONAL	1
+#define YAMA_SCOPE_CAPABILITY	2
+#define YAMA_SCOPE_NO_ATTACH	3
+
+static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
+
+/* describe a ptrace relationship for potential exception */
+struct ptrace_relation {
+	struct task_struct *tracer;
+	struct task_struct *tracee;
+	bool invalid;
+	struct list_head node;
+	struct rcu_head rcu;
+};
+
+static LIST_HEAD(ptracer_relations);
+static DEFINE_SPINLOCK(ptracer_relations_lock);
+
+static void yama_relation_cleanup(struct work_struct *work);
+static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
+
+/**
+ * yama_relation_cleanup - remove invalid entries from the relation list
+ *
+ */
+static void yama_relation_cleanup(struct work_struct *work)
+{
+	struct ptrace_relation *relation;
+
+	spin_lock(&ptracer_relations_lock);
+	rcu_read_lock();
+	list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+		if (relation->invalid) {
+			list_del_rcu(&relation->node);
+			kfree_rcu(relation, rcu);
+		}
+	}
+	rcu_read_unlock();
+	spin_unlock(&ptracer_relations_lock);
+}
+
+/**
+ * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
+ * @tracer: the task_struct of the process doing the ptrace
+ * @tracee: the task_struct of the process to be ptraced
+ *
+ * Each tracee can have, at most, one tracer registered. Each time this
+ * is called, the prior registered tracer will be replaced for the tracee.
+ *
+ * Returns 0 if relationship was added, -ve on error.
+ */
+static int yama_ptracer_add(struct task_struct *tracer,
+			    struct task_struct *tracee)
+{
+	struct ptrace_relation *relation, *added;
+
+	added = kmalloc(sizeof(*added), GFP_KERNEL);
+	if (!added)
+		return -ENOMEM;
+
+	added->tracee = tracee;
+	added->tracer = tracer;
+	added->invalid = false;
+
+	spin_lock(&ptracer_relations_lock);
+	rcu_read_lock();
+	list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+		if (relation->invalid)
+			continue;
+		if (relation->tracee == tracee) {
+			list_replace_rcu(&relation->node, &added->node);
+			kfree_rcu(relation, rcu);
+			goto out;
+		}
+	}
+
+	list_add_rcu(&added->node, &ptracer_relations);
+
+out:
+	rcu_read_unlock();
+	spin_unlock(&ptracer_relations_lock);
+	return 0;
+}
+
+/**
+ * yama_ptracer_del - remove exceptions related to the given tasks
+ * @tracer: remove any relation where tracer task matches
+ * @tracee: remove any relation where tracee task matches
+ */
+static void yama_ptracer_del(struct task_struct *tracer,
+			     struct task_struct *tracee)
+{
+	struct ptrace_relation *relation;
+	bool marked = false;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+		if (relation->invalid)
+			continue;
+		if (relation->tracee == tracee ||
+		    (tracer && relation->tracer == tracer)) {
+			relation->invalid = true;
+			marked = true;
+		}
+	}
+	rcu_read_unlock();
+
+	if (marked)
+		schedule_work(&yama_relation_work);
+}
+
+/**
+ * yama_task_free - check for task_pid to remove from exception list
+ * @task: task being removed
+ */
+void yama_task_free(struct task_struct *task)
+{
+	yama_ptracer_del(task, task);
+}
+
+/**
+ * yama_task_prctl - check for Yama-specific prctl operations
+ * @option: operation
+ * @arg2: argument
+ * @arg3: argument
+ * @arg4: argument
+ * @arg5: argument
+ *
+ * Return 0 on success, -ve on error.  -ENOSYS is returned when Yama
+ * does not handle the given option.
+ */
+int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+			   unsigned long arg4, unsigned long arg5)
+{
+	int rc = -ENOSYS;
+	struct task_struct *myself = current;
+
+	switch (option) {
+	case PR_SET_PTRACER:
+		/* Since a thread can call prctl(), find the group leader
+		 * before calling _add() or _del() on it, since we want
+		 * process-level granularity of control. The tracer group
+		 * leader checking is handled later when walking the ancestry
+		 * at the time of PTRACE_ATTACH check.
+		 */
+		rcu_read_lock();
+		if (!thread_group_leader(myself))
+			myself = rcu_dereference(myself->group_leader);
+		get_task_struct(myself);
+		rcu_read_unlock();
+
+		if (arg2 == 0) {
+			yama_ptracer_del(NULL, myself);
+			rc = 0;
+		} else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
+			rc = yama_ptracer_add(NULL, myself);
+		} else {
+			struct task_struct *tracer;
+
+			rcu_read_lock();
+			tracer = find_task_by_vpid(arg2);
+			if (tracer)
+				get_task_struct(tracer);
+			else
+				rc = -EINVAL;
+			rcu_read_unlock();
+
+			if (tracer) {
+				rc = yama_ptracer_add(tracer, myself);
+				put_task_struct(tracer);
+			}
+		}
+
+		put_task_struct(myself);
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * task_is_descendant - walk up a process family tree looking for a match
+ * @parent: the process to compare against while walking up from child
+ * @child: the process to start from while looking upwards for parent
+ *
+ * Returns 1 if child is a descendant of parent, 0 if not.
+ */
+static int task_is_descendant(struct task_struct *parent,
+			      struct task_struct *child)
+{
+	int rc = 0;
+	struct task_struct *walker = child;
+
+	if (!parent || !child)
+		return 0;
+
+	rcu_read_lock();
+	if (!thread_group_leader(parent))
+		parent = rcu_dereference(parent->group_leader);
+	while (walker->pid > 0) {
+		if (!thread_group_leader(walker))
+			walker = rcu_dereference(walker->group_leader);
+		if (walker == parent) {
+			rc = 1;
+			break;
+		}
+		walker = rcu_dereference(walker->real_parent);
+	}
+	rcu_read_unlock();
+
+	return rc;
+}
+
+/**
+ * ptracer_exception_found - tracer registered as exception for this tracee
+ * @tracer: the task_struct of the process attempting ptrace
+ * @tracee: the task_struct of the process to be ptraced
+ *
+ * Returns 1 if tracer has is ptracer exception ancestor for tracee.
+ */
+static int ptracer_exception_found(struct task_struct *tracer,
+				   struct task_struct *tracee)
+{
+	int rc = 0;
+	struct ptrace_relation *relation;
+	struct task_struct *parent = NULL;
+	bool found = false;
+
+	rcu_read_lock();
+	if (!thread_group_leader(tracee))
+		tracee = rcu_dereference(tracee->group_leader);
+	list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+		if (relation->invalid)
+			continue;
+		if (relation->tracee == tracee) {
+			parent = relation->tracer;
+			found = true;
+			break;
+		}
+	}
+
+	if (found && (parent == NULL || task_is_descendant(parent, tracer)))
+		rc = 1;
+	rcu_read_unlock();
+
+	return rc;
+}
+
+/**
+ * yama_ptrace_access_check - validate PTRACE_ATTACH calls
+ * @child: task that current task is attempting to ptrace
+ * @mode: ptrace attach mode
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+static int yama_ptrace_access_check(struct task_struct *child,
+				    unsigned int mode)
+{
+	int rc = 0;
+
+	/* require ptrace target be a child of ptracer on attach */
+	if (mode & PTRACE_MODE_ATTACH) {
+		switch (ptrace_scope) {
+		case YAMA_SCOPE_DISABLED:
+			/* No additional restrictions. */
+			break;
+		case YAMA_SCOPE_RELATIONAL:
+			rcu_read_lock();
+			if (!task_is_descendant(current, child) &&
+			    !ptracer_exception_found(current, child) &&
+			    !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
+				rc = -EPERM;
+			rcu_read_unlock();
+			break;
+		case YAMA_SCOPE_CAPABILITY:
+			rcu_read_lock();
+			if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
+				rc = -EPERM;
+			rcu_read_unlock();
+			break;
+		case YAMA_SCOPE_NO_ATTACH:
+		default:
+			rc = -EPERM;
+			break;
+		}
+	}
+
+	if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
+		printk_ratelimited(KERN_NOTICE
+			"ptrace of pid %d was attempted by: %s (pid %d)\n",
+			child->pid, current->comm, current->pid);
+	}
+
+	return rc;
+}
+
+/**
+ * yama_ptrace_traceme - validate PTRACE_TRACEME calls
+ * @parent: task that will become the ptracer of the current task
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+int yama_ptrace_traceme(struct task_struct *parent)
+{
+	int rc = 0;
+
+	/* Only disallow PTRACE_TRACEME on more aggressive settings. */
+	switch (ptrace_scope) {
+	case YAMA_SCOPE_CAPABILITY:
+		if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
+			rc = -EPERM;
+		break;
+	case YAMA_SCOPE_NO_ATTACH:
+		rc = -EPERM;
+		break;
+	}
+
+	if (rc) {
+		printk_ratelimited(KERN_NOTICE
+			"ptraceme of pid %d was attempted by: %s (pid %d)\n",
+			current->pid, parent->comm, parent->pid);
+	}
+
+	return rc;
+}
+
+static struct security_hook_list yama_hooks[] = {
+	LSM_HOOK_INIT(ptrace_access_check, yama_ptrace_access_check),
+	LSM_HOOK_INIT(ptrace_traceme, yama_ptrace_traceme),
+	LSM_HOOK_INIT(task_prctl, yama_task_prctl),
+	LSM_HOOK_INIT(task_free, yama_task_free),
+};
+
+#ifdef CONFIG_SYSCTL
+static int yama_dointvec_minmax(struct ctl_table *table, int write,
+				void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	struct ctl_table table_copy;
+
+	if (write && !capable(CAP_SYS_PTRACE))
+		return -EPERM;
+
+	/* Lock the max value if it ever gets set. */
+	table_copy = *table;
+	if (*(int *)table_copy.data == *(int *)table_copy.extra2)
+		table_copy.extra1 = table_copy.extra2;
+
+	return proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos);
+}
+
+static int zero;
+static int max_scope = YAMA_SCOPE_NO_ATTACH;
+
+struct ctl_path yama_sysctl_path[] = {
+	{ .procname = "kernel", },
+	{ .procname = "yama", },
+	{ }
+};
+
+static struct ctl_table yama_sysctl_table[] = {
+	{
+		.procname       = "ptrace_scope",
+		.data           = &ptrace_scope,
+		.maxlen         = sizeof(int),
+		.mode           = 0644,
+		.proc_handler   = yama_dointvec_minmax,
+		.extra1         = &zero,
+		.extra2         = &max_scope,
+	},
+	{ }
+};
+static void __init yama_init_sysctl(void)
+{
+	if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
+		panic("Yama: sysctl registration failed.\n");
+}
+#else
+static inline void yama_init_sysctl(void) { }
+#endif /* CONFIG_SYSCTL */
+
+void __init yama_add_hooks(void)
+{
+	pr_info("Yama: becoming mindful.\n");
+	security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks));
+	yama_init_sysctl();
+}