Initial VNFTEST fw
Issue-ID: VNFSDK-195

Change-Id: I5abf0dd033e76e5225bb8271c0afaea325d741d9
Signed-off-by: Moshe <moshehoa@amdocs.com>

docker build

Issue-ID: VNFSDK-195

Change-Id: I25eb933504c0201e6c26477b540626fd515d2887

Signed-off-by: Moshe <moshehoa@amdocs.com>

fix requirements

Issue-ID: VNFSDK-195
Change-Id: I5907fa102bfbf9cb81d42e491c133b4fdbb0d6fd
Signed-off-by: Moshe <moshehoa@amdocs.com>

rm netifaces

Issue-ID: VNFSDK-195
Change-Id: I349d0c738442edfef256c90b06cbaeb446c1db13
Signed-off-by: Moshe <moshehoa@amdocs.com>

fix tox config

IssueID: VNFTEST-195

Change-Id: I5c0b0e0ab96cad1bdc56ab63860d794bfd15b5eb
Signed-off-by: Moshe <moshehoa@amdocs.com>

Add unit test

IssueID: VNFTEST-195
Change-Id: I08c9ba53721306aff4b74720181f8c853c4ccabe
Signed-off-by: Moshe <moshehoa@amdocs.com>

fix setup.py

Issue-ID: VNFSDK-195
Change-Id: I72bd93e4977edf5ef0b46c72fe47165b805aab7b
Signed-off-by: Moshe <moshehoa@amdocs.com>

fix test execution

Issue-ID: VNFSDK-195
Change-Id: I488a6226d2562229f0e7fa6c1d20f0c43882bc3b
Signed-off-by: Moshe <moshehoa@amdocs.com>
diff --git a/dist/vnftest-0.1.dev0-py2.7.egg b/dist/vnftest-0.1.dev0-py2.7.egg
new file mode 100644
index 0000000..f237cd8
--- /dev/null
+++ b/dist/vnftest-0.1.dev0-py2.7.egg
Binary files differ
diff --git a/docker/Dockerfile b/docker/Dockerfile
new file mode 100644
index 0000000..20598c7
--- /dev/null
+++ b/docker/Dockerfile
@@ -0,0 +1,59 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/docker/Dockerfile
+FROM ubuntu:16.04
+
+LABEL image=onap/vnftest
+
+ARG BRANCH=master
+
+# GIT repo directory
+ENV REPOS_DIR="/home/onap/repos" \
+    IMAGE_DIR="/home/onap/images/"
+
+# Set work directory
+
+# Vnftest repo
+ENV VNFTEST_REPO_DIR="${REPOS_DIR}/vnftest" \
+    RELENG_REPO_DIR="${REPOS_DIR}/releng" \
+    STORPERF_REPO_DIR="${REPOS_DIR}/storperf"
+
+RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && apt-get clean
+RUN easy_install -U setuptools==30.0.0
+RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0
+
+RUN mkdir -p ${REPOS_DIR}
+
+RUN git config --global http.sslVerify false
+RUN git clone --depth 1 -b $BRANCH https://gerrit.onap.org/r/vnfsdk/dovetail-integration ${VNFTEST_REPO_DIR}
+
+WORKDIR ${VNFTEST_REPO_DIR}
+RUN ${VNFTEST_REPO_DIR}/install.sh
+RUN ${VNFTEST_REPO_DIR}/docker/supervisor.sh
+
+RUN echo "daemon off;" >> /etc/nginx/nginx.conf
+
+EXPOSE 6000
+
+ADD http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img ${IMAGE_DIR}
+ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img ${IMAGE_DIR}
+
+COPY ./exec_tests.sh /usr/local/bin/
+
+ENV NSB_DIR="/opt/nsb_bin" \
+    PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl"
+
+WORKDIR ${REPOS_DIR}
+CMD ["/usr/bin/supervisord"]
diff --git a/docker/Makefile b/docker/Makefile
new file mode 100644
index 0000000..4f9e18b
--- /dev/null
+++ b/docker/Makefile
@@ -0,0 +1,43 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/docker/Makefile
+
+SHELL = /bin/bash
+
+IMAGE = onap/vnftest
+
+DOCKER_TAG = beijing.0
+
+.PHONY: all
+all: .docker
+
+.docker:
+	/usr/bin/docker build --rm=true --no-cache=true -t $(IMAGE):$(DOCKER_TAG) .
+	/usr/bin/docker tag $(IMAGE):$(DOCKER_TAG) $(IMAGE):latest
+	touch .docker
+
+
+.PHONY:	clean
+clean:
+	@docker ps | grep $(IMAGE) | awk '{print $$1}' | xargs -r docker stop
+	@docker ps -a | grep $(IMAGE) | awk '{print $$1}' | xargs -r docker rm
+	@echo "Docker images to remove:"
+	@docker images | head -1 && docker images | grep $(IMAGE) || true
+	@image_tags=($$(docker images | grep $(IMAGE) | awk '{print $$2}')) ; \
+	for tag in "$${image_tags[@]}"; do \
+	   echo "Removing docker image $(IMAGE):$$tag..." ; \
+	   docker rmi $(IMAGE):$$tag ; \
+	done
+	rm -f .docker
diff --git a/docker/exec_tests.sh b/docker/exec_tests.sh
new file mode 100755
index 0000000..9e16476
--- /dev/null
+++ b/docker/exec_tests.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/docker/exec_tests.sh
+set -e
+
+: ${VNFTEST_REPO:='https://gerrit.onap.org/gerrit/vnftest'}
+: ${VNFTEST_REPO_DIR:='/home/onap/repos/vnftest'}
+: ${VNFTEST_BRANCH:='master'} # branch, tag, sha1 or refspec
+
+# git update using reference as a branch.
+# git_update_branch ref
+function git_update_branch {
+    local git_branch=$1
+
+    git checkout -f origin/${git_branch}
+    # a local branch might not exist
+    git branch -D ${git_branch} || true
+    git checkout -b ${git_branch}
+}
+
+# git update using reference as a branch.
+# git_update_remote_branch ref
+function git_update_remote_branch {
+    local git_branch=$1
+
+    git checkout -b ${git_branch} -t origin/${git_branch}
+}
+
+# git update using reference as a tag. Be careful editing source at that repo
+# as working copy will be in a detached mode
+# git_update_tag ref
+function git_update_tag {
+    local git_tag=$1
+
+    git tag -d ${git_tag}
+    # fetching given tag only
+    git fetch origin tag ${git_tag}
+    git checkout -f ${git_tag}
+}
+
+
+# OpenStack Functions
+
+git_checkout()
+{
+    local git_ref=$1
+    if [[ -n "$(git show-ref refs/tags/${git_ref})" ]]; then
+        git_update_tag "${git_ref}"
+    elif [[ -n "$(git show-ref refs/heads/${git_ref})" ]]; then
+        git_update_branch "${git_ref}"
+    elif [[ -n "$(git show-ref refs/remotes/origin/${git_ref})" ]]; then
+        git_update_remote_branch "${git_ref}"
+    # check to see if it is a remote ref
+    elif git fetch --tags origin "${git_ref}"; then
+        # refspec / changeset
+        git checkout FETCH_HEAD
+    else
+        # if we are a random commit id we have to unshallow
+        # to get all the commits
+        git fetch --unshallow origin
+        git checkout -f "${git_ref}"
+    fi
+}
+
+# releng is not needed, we bind-mount the credentials
+
+echo
+echo "INFO: Updating vnftest -> ${VNFTEST_BRANCH}"
+if [ ! -d ${VNFTEST_REPO_DIR} ]; then
+    git clone ${VNFTEST_REPO} ${VNFTEST_REPO_DIR}
+fi
+cd ${VNFTEST_REPO_DIR}
+git_checkout ${VNFTEST_BRANCH}
+
+if [[ "${DEPLOY_STEP:0:2}" == "os" ]];then
+    # setup the environment
+    source ${VNFTEST_REPO_DIR}/tests/ci/prepare_env.sh
+fi
+
+# execute tests
+${VNFTEST_REPO_DIR}/tests/ci/vnftest-verify $@
diff --git a/docker/nginx.sh b/docker/nginx.sh
new file mode 100755
index 0000000..ac81c73
--- /dev/null
+++ b/docker/nginx.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/docker/nginx.sh
+
+# nginx config
+nginx_config='/etc/nginx/conf.d/vnftest.conf'
+
+if [[ ! -e "${nginx_config}" ]];then
+
+    cat << EOF > "${nginx_config}"
+server {
+    listen 6000;
+    server_name localhost;
+    index  index.htm index.html;
+    location / {
+        include uwsgi_params;
+        client_max_body_size    2000m;
+        uwsgi_pass unix:///var/run/vnftest.sock;
+    }
+
+    location /gui/ {
+        alias /etc/nginx/vnftest/gui/;
+    }
+
+    location /report/ {
+        alias /tmp/;
+    }
+}
+EOF
+fi
diff --git a/docker/supervisor.sh b/docker/supervisor.sh
new file mode 100755
index 0000000..0462024
--- /dev/null
+++ b/docker/supervisor.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/docker/supervisor.sh
+
+# nginx service start when boot
+supervisor_config='/etc/supervisor/conf.d/vnftest.conf'
+
+if [[ ! -e "${supervisor_config}" ]];then
+    cat << EOF > "${supervisor_config}"
+[supervisord]
+nodaemon = true
+
+[program:nginx]
+command = service nginx restart
+
+[program:vnftest_uwsgi]
+directory = /etc/vnftest
+command = uwsgi -i vnftest.ini
+EOF
+fi
diff --git a/docker/uwsgi.sh b/docker/uwsgi.sh
new file mode 100755
index 0000000..da0833a
--- /dev/null
+++ b/docker/uwsgi.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/docker/uwsgi.sh
+
+: ${VNFTEST_REPO_DIR:='/home/onap/repos/vnftest'}
+
+# generate uwsgi config file
+mkdir -p /etc/vnftest
+
+# create api log directory
+mkdir -p /var/log/vnftest
+
+# create vnftest.sock for communicating
+touch /var/run/vnftest.sock
+
+uwsgi_config='/etc/vnftest/vnftest.ini'
+if [[ ! -e "${uwsgi_config}" ]];then
+
+    cat << EOF > "${uwsgi_config}"
+[uwsgi]
+master = true
+debug = true
+chdir = ${VNFTEST_REPO_DIR}/api
+module = server
+plugins = python
+processes = 10
+threads = 5
+async = true
+max-requests = 5000
+chmod-socket = 666
+callable = app_wrapper
+enable-threads = true
+close-on-exec = 1
+daemonize= /var/log/vnftest/uwsgi.log
+socket = /var/run/vnftest.sock
+EOF
+    if [[ "${VNFTEST_VENV}" ]];then
+        echo "virtualenv = ${VNFTEST_VENV}" >> "${uwsgi_config}"
+    fi
+fi
diff --git a/etc/__init__.py b/etc/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/etc/__init__.py
diff --git a/etc/vnftest/vnftest.conf b/etc/vnftest/vnftest.conf
new file mode 100644
index 0000000..8d26d21
--- /dev/null
+++ b/etc/vnftest/vnftest.conf
@@ -0,0 +1,27 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+[DEFAULT]
+debug = False
+# setup multiple dipatcher with comma deperted e.g. file,http
+dispatcher = file
+
+[dispatcher_http]
+timeout = 5
+target = http://127.0.0.1:8000/results
+
+[dispatcher_file]
+#file_path = /tmp/vnftest.out
+max_bytes = 0
+backup_count = 0
\ No newline at end of file
diff --git a/etc/vnftest/vnftest.yaml b/etc/vnftest/vnftest.yaml
new file mode 100644
index 0000000..b7d42c9
--- /dev/null
+++ b/etc/vnftest/vnftest.yaml
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+dir:
+  config: "/a/b"
+#  conf: /etc/nvftest
+#  repos: /home/vnftest/repos/vnftest
+#  log: /tmp/vnftest
+
+file:
+  out: "a/b"
+#  output_file: /tmp/vnftest.out
+#  html_file: /tmp/vnftest.htm
+#  reporting_file: /tmp/report.html
+
+component:
+  aai_ip: 10.247.159.158
+  aai_port: 30202
+  aai_ssl_port: 30233
+  mso_ip: 10.247.159.144
+  sdc_ip: 10.247.159.182
+  sdc_port: 30205
+  sdc_catalog_port: 30206
+  sdc_designer_user: cs0008
+  sdc_tester_user: jm0007
+  sdc_governance_user: gv0001
+  sdc_operations_user: op0001
\ No newline at end of file
diff --git a/ez_setup.py b/ez_setup.py
new file mode 100644
index 0000000..60d6ffc
--- /dev/null
+++ b/ez_setup.py
@@ -0,0 +1,367 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/ez_setup.py
+"""Bootstrap setuptools installation
+
+To use setuptools in your package's setup.py, include this
+file in the same directory and add this to the top of your setup.py::
+
+    from ez_setup import use_setuptools
+    use_setuptools()
+
+To require a specific version of setuptools, set a download
+mirror, or use an alternate download directory, simply supply
+the appropriate options to ``use_setuptools()``.
+
+This file can also be run as a script to install or upgrade setuptools.
+"""
+from __future__ import absolute_import
+import os
+import shutil
+import sys
+import tempfile
+import zipfile
+import optparse
+import subprocess
+import platform
+import contextlib
+
+from distutils import log
+
+try:
+    from urllib.request import urlopen
+except ImportError:
+    from six.moves.urllib import urlopen
+
+try:
+    from site import USER_SITE
+except ImportError:
+    USER_SITE = None
+
+DEFAULT_VERSION = "6.1"
+DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
+
+
+def _python_cmd(*args):
+    """
+    Return True if the command succeeded.
+    """
+    args = (sys.executable,) + args
+    return subprocess.call(args) == 0
+
+
+def _install(archive_filename, install_args=()):
+    with archive_context(archive_filename):
+        # installing
+        log.warn('Installing Setuptools')
+        if not _python_cmd('setup.py', 'install', *install_args):
+            log.warn('Something went wrong during the installation.')
+            log.warn('See the error message above.')
+            # exitcode will be 2
+            return 2
+
+
+def _build_egg(egg, archive_filename, to_dir):
+    with archive_context(archive_filename):
+        # building an egg
+        log.warn('Building a Setuptools egg in %s', to_dir)
+        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+    # returning the result
+    log.warn(egg)
+    if not os.path.exists(egg):
+        raise IOError('Could not build the egg.')
+
+
+class ContextualZipFile(zipfile.ZipFile):
+    """
+    Supplement ZipFile class to support context manager for Python 2.6
+    """
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self.close()
+
+    def __new__(cls, *args, **kwargs):
+        """
+        Construct a ZipFile or ContextualZipFile as appropriate
+        """
+        if hasattr(zipfile.ZipFile, '__exit__'):
+            return zipfile.ZipFile(*args, **kwargs)
+        return super(ContextualZipFile, cls).__new__(cls)
+
+
+@contextlib.contextmanager
+def archive_context(filename):
+    # extracting the archive
+    tmpdir = tempfile.mkdtemp()
+    log.warn('Extracting in %s', tmpdir)
+    old_wd = os.getcwd()
+    try:
+        os.chdir(tmpdir)
+        with ContextualZipFile(filename) as archive:
+            archive.extractall()
+
+        # going in the directory
+        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+        os.chdir(subdir)
+        log.warn('Now working in %s', subdir)
+        yield
+
+    finally:
+        os.chdir(old_wd)
+        shutil.rmtree(tmpdir)
+
+
+def _do_download(version, download_base, to_dir, download_delay):
+    egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
+                       % (version, sys.version_info[0], sys.version_info[1]))
+    if not os.path.exists(egg):
+        archive = download_setuptools(version, download_base,
+                                      to_dir, download_delay)
+        _build_egg(egg, archive, to_dir)
+    sys.path.insert(0, egg)
+
+    # Remove previously-imported pkg_resources if present (see
+    # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
+    if 'pkg_resources' in sys.modules:
+        del sys.modules['pkg_resources']
+
+    import setuptools
+    setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+                   to_dir=os.curdir, download_delay=15):
+    to_dir = os.path.abspath(to_dir)
+    rep_modules = 'pkg_resources', 'setuptools'
+    imported = set(sys.modules).intersection(rep_modules)
+    try:
+        import pkg_resources
+    except ImportError:
+        return _do_download(version, download_base, to_dir, download_delay)
+    try:
+        pkg_resources.require("setuptools>=" + version)
+        return
+    except pkg_resources.DistributionNotFound:
+        return _do_download(version, download_base, to_dir, download_delay)
+    except pkg_resources.VersionConflict as VC_err:
+        if imported:
+            msg = """\
+The required version of setuptools (>={version}) is not available,
+and can't be installed while this script is running. Please
+install a more recent version first, using
+'easy_install -U setuptools'.
+
+(Currently using {VC_err.args[0]!r})
+""".format(VC_err=VC_err, version=version)
+            sys.stderr.write(msg)
+            sys.exit(2)
+
+        # otherwise, reload ok
+        del pkg_resources, sys.modules['pkg_resources']
+        return _do_download(version, download_base, to_dir, download_delay)
+
+
+def _clean_check(cmd, target):
+    """
+    Run the command to download target. If the command fails, clean up before
+    re-raising the error.
+    """
+    try:
+        subprocess.check_call(cmd)
+    except subprocess.CalledProcessError:
+        if os.access(target, os.F_OK):
+            os.unlink(target)
+        raise
+
+
+def download_file_powershell(url, target):
+    """
+    Download the file at url to target using Powershell (which will validate
+    trust). Raise an exception if the command cannot complete.
+    """
+    target = os.path.abspath(target)
+    ps_cmd = (
+        "[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
+        "[System.Net.CredentialCache]::DefaultCredentials; "
+        "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
+        % vars()
+    )
+    cmd = [
+        'powershell',
+        '-Command',
+        ps_cmd,
+    ]
+    _clean_check(cmd, target)
+
+
+def has_powershell():
+    if platform.system() != 'Windows':
+        return False
+    cmd = ['powershell', '-Command', 'echo test']
+    with open(os.path.devnull, 'wb') as devnull:
+        try:
+            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+        except Exception:
+            return False
+    return True
+
+
+download_file_powershell.viable = has_powershell
+
+
+def download_file_curl(url, target):
+    cmd = ['curl', url, '--silent', '--output', target]
+    _clean_check(cmd, target)
+
+
+def has_curl():
+    cmd = ['curl', '--version']
+    with open(os.path.devnull, 'wb') as devnull:
+        try:
+            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+        except Exception:
+            return False
+    return True
+
+
+download_file_curl.viable = has_curl
+
+
+def download_file_wget(url, target):
+    cmd = ['wget', url, '--quiet', '--output-document', target]
+    _clean_check(cmd, target)
+
+
+def has_wget():
+    cmd = ['wget', '--version']
+    with open(os.path.devnull, 'wb') as devnull:
+        try:
+            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+        except Exception:
+            return False
+    return True
+
+
+download_file_wget.viable = has_wget
+
+
+def download_file_insecure(url, target):
+    """
+    Use Python to download the file, even though it cannot authenticate the
+    connection.
+    """
+    src = urlopen(url)
+    try:
+        # Read all the data in one block.
+        data = src.read()
+    finally:
+        src.close()
+
+    # Write all the data in one block to avoid creating a partial file.
+    with open(target, "wb") as dst:
+        dst.write(data)
+
+
+download_file_insecure.viable = lambda: True
+
+
+def get_best_downloader():
+    downloaders = (
+        download_file_powershell,
+        download_file_curl,
+        download_file_wget,
+        download_file_insecure,
+    )
+    viable_downloaders = (dl for dl in downloaders if dl.viable())
+    return next(viable_downloaders, None)
+
+
+def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+                        to_dir=os.curdir, delay=15,
+                        downloader_factory=get_best_downloader):
+    """
+    Download setuptools from a specified location and return its filename
+
+    `version` should be a valid setuptools version number that is available
+    as an sdist for download under the `download_base` URL (which should end
+    with a '/'). `to_dir` is the directory where the egg will be downloaded.
+    `delay` is the number of seconds to pause before an actual download
+    attempt.
+
+    ``downloader_factory`` should be a function taking no arguments and
+    returning a function for downloading a URL to a target.
+    """
+    # making sure we use the absolute path
+    to_dir = os.path.abspath(to_dir)
+    zip_name = "setuptools-%s.zip" % version
+    url = download_base + zip_name
+    saveto = os.path.join(to_dir, zip_name)
+    if not os.path.exists(saveto):  # Avoid repeated downloads
+        log.warn("Downloading %s", url)
+        downloader = downloader_factory()
+        downloader(url, saveto)
+    return os.path.realpath(saveto)
+
+
+def _build_install_args(options):
+    """
+    Build the arguments to 'python setup.py install' on the setuptools package
+    """
+    return ['--user'] if options.user_install else []
+
+
+def _parse_args():
+    """
+    Parse the command line for options
+    """
+    parser = optparse.OptionParser()
+    parser.add_option(
+        '--user', dest='user_install', action='store_true', default=False,
+        help='install in user site package (requires Python 2.6 or later)')
+    parser.add_option(
+        '--download-base', dest='download_base', metavar="URL",
+        default=DEFAULT_URL,
+        help='alternative URL from where to download the setuptools package')
+    parser.add_option(
+        '--insecure', dest='downloader_factory', action='store_const',
+        const=lambda: download_file_insecure, default=get_best_downloader,
+        help='Use internal, non-validating downloader'
+    )
+    parser.add_option(
+        '--version', help="Specify which version to download",
+        default=DEFAULT_VERSION,
+    )
+    options, args = parser.parse_args()
+    # positional arguments are ignored
+    return options
+
+
+def main():
+    """Install or upgrade setuptools and EasyInstall"""
+    options = _parse_args()
+    archive = download_setuptools(
+        version=options.version,
+        download_base=options.download_base,
+        downloader_factory=options.downloader_factory,
+    )
+    return _install(archive, _build_install_args(options))
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/install.sh b/install.sh
new file mode 100755
index 0000000..75813c5
--- /dev/null
+++ b/install.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/install.sh
+
+# fit for arm64
+DOCKER_ARCH="$(uname -m)"
+
+UBUNTU_PORTS_URL="http://ports.ubuntu.com/ubuntu-ports/"
+UBUNTU_ARCHIVE_URL="http://archive.ubuntu.com/ubuntu/"
+
+source /etc/os-release
+source_file=/etc/apt/sources.list
+NSB_DIR="/opt/nsb_bin"
+
+if [[ "${DOCKER_ARCH}" == "aarch64" ]]; then
+    sed -i -e 's/^deb \([^/[]\)/deb [arch=arm64] \1/g' "${source_file}"
+    DOCKER_ARCH="arm64"
+    DOCKER_REPO="${UBUNTU_PORTS_URL}"
+    EXTRA_ARCH="amd64"
+    EXTRA_REPO="${UBUNTU_ARCHIVE_URL}"
+    dpkg --add-architecture amd64
+else
+    sed -i -e 's/^deb \([^/[]\)/deb [arch=amd64] \1/g' "${source_file}"
+    DOCKER_ARCH="amd64"
+    DOCKER_REPO="${UBUNTU_ARCHIVE_URL}"
+    EXTRA_ARCH="arm64"
+    EXTRA_REPO="${UBUNTU_PORTS_URL}"
+    dpkg --add-architecture arm64
+fi
+
+sed -i -e 's/^deb-src /# deb-src /g' "${source_file}"
+
+VERSION_CODENAME=${VERSION_CODENAME:-trusty}
+
+echo "APT::Default-Release \""${VERSION_CODENAME}"\";" > /etc/apt/apt.conf.d/default-distro
+
+sub_source_file=/etc/apt/sources.list.d/vnftest.list
+touch "${sub_source_file}"
+
+# first add xenial repo needed for installing qemu_static_user/xenial in the container
+# then add complementary architecture repositories in case the cloud image is of different arch
+if [[ "${VERSION_CODENAME}" != "xenial" ]]; then
+    REPO_UPDATE="deb [arch="${DOCKER_ARCH}"] "${DOCKER_REPO}" xenial-updates universe"
+fi
+
+echo -e ""${REPO_UPDATE}"
+deb [arch="${EXTRA_ARCH}"] "${EXTRA_REPO}" "${VERSION_CODENAME}" main universe multiverse restricted
+deb [arch="${EXTRA_ARCH}"] "${EXTRA_REPO}" "${VERSION_CODENAME}"-updates main universe multiverse restricted
+deb [arch="${EXTRA_ARCH}"] "${EXTRA_REPO}" "${VERSION_CODENAME}"-security main universe multiverse restricted
+deb [arch="${EXTRA_ARCH}"] "${EXTRA_REPO}" "${VERSION_CODENAME}"-proposed main universe multiverse restricted" > "${sub_source_file}"
+
+echo "vm.mmap_min_addr = 0" > /etc/sysctl.d/mmap_min_addr.conf
+
+# install tools
+apt-get update && apt-get install -y \
+    qemu-user-static/xenial \
+    bonnie++ \
+    wget \
+    expect \
+    curl \
+    git \
+    sshpass \
+    qemu-utils \
+    kpartx \
+    libffi-dev \
+    libssl-dev \
+    libzmq-dev \
+    python \
+    python-dev \
+    libxml2-dev \
+    libxslt1-dev \
+    nginx \
+    uwsgi \
+    uwsgi-plugin-python \
+    supervisor \
+    python-pip \
+    vim \
+    libxft-dev \
+    libxss-dev \
+    sudo \
+    iputils-ping
+
+if [[ "${DOCKER_ARCH}" != "aarch64" ]]; then
+    apt-get install -y libc6:arm64
+fi
+
+apt-get -y autoremove && apt-get clean
+
+git config --global http.sslVerify false
+
+
+# install vnftest + dependencies
+easy_install -U pip
+pip install -r requirements.txt
+pip install -e .
+
+/bin/bash "${PWD}/docker/uwsgi.sh"
+/bin/bash "${PWD}/docker/nginx.sh"
+cd "${PWD}/gui" && /bin/bash gui.sh
+mkdir -p /etc/nginx/vnftest
+mv dist /etc/nginx/vnftest/gui
+
+mkdir -p ${NSB_DIR}
+
+wget -P ${NSB_DIR}/ http://artifacts.onap.org/vnftest/third-party/trex_client.tar.gz
+tar xvf ${NSB_DIR}/trex_client.tar.gz -C ${NSB_DIR}
+rm -f ${NSB_DIR}/trex_client.tar.gz
+
+service nginx restart
+uwsgi -i /etc/vnftest/vnftest.ini
diff --git a/pre_config/env_config.sh b/pre_config/env_config.sh
new file mode 100644
index 0000000..0e6967f
--- /dev/null
+++ b/pre_config/env_config.sh
@@ -0,0 +1,16 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+export HOME=/home/devel/dev/onap/vnfsdk
+export CONF_FILE=/home/devel/dev/onap/vnfsdk/vnftest/etc/vnftest/vnftest.yaml
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..3082d03
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,70 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/requirements.py
+
+Babel==2.3.4            # BSD; OSI Approved  BSD License
+Jinja2==2.9.6           # BSD; OSI Approved  BSD License
+SQLAlchemy==1.1.12      # MIT License; OSI Approved  MIT License
+PTable==0.9.2           # BSD (3 clause); OSI Approved  BSD License
+backport-ipaddress==0.1; python_version <= "2.7"    # OSI Approved  Python Software Foundation License
+chainmap==1.0.2         # Python Software Foundation License; OSI Approved  Python Software Foundation License
+django==1.8.17          # BSD; OSI Approved  BSD License
+                        # NOTE(ralonsoh): django must be bumped to 1.11.8; consider the migration notes [1]
+                        # [1] https://docs.djangoproject.com/ja/1.11/ref/templates/upgrading/
+docker-py==1.10.6       # OSI Approved  Apache Software License
+extras==1.0.0           # OSI Approved  MIT License
+flasgger==0.5.13        # MIT
+flask-restful-swagger==0.19 # MIT
+flask-restful==0.3.5    # BSD; OSI Approved  BSD License
+flask==0.11.1           # BSD; OSI Approved  BSD License
+functools32==3.2.3.post2; python_version <= "2.7"    # PSF license
+futures==3.1.1;python_version=='2.7'    # BSD; OSI Approved  BSD License
+influxdb==4.1.1         # MIT License; OSI Approved  MIT License
+jinja2schema==0.1.4     # OSI Approved  BSD License
+keystoneauth1==3.1.0    # OSI Approved  Apache Software License
+mock==2.0.0             # OSI Approved  BSD License; `BSD License`_; http://github.com/testing-cabal/mock/blob/master/LICENSE.txt
+msgpack-python==0.4.8   # OSI Approved  Apache Software License
+netaddr==0.7.19         # BSD License; OSI Approved  BSD License; OSI Approved  MIT License
+os-client-config==1.28.0    # OSI Approved  Apache Software License
+osc-lib==1.7.0          # OSI Approved  Apache Software License
+oslo.config==4.11.1     # OSI Approved  Apache Software License
+oslo.i18n==3.17.0       # OSI Approved  Apache Software License
+oslo.serialization==2.20.1  # OSI Approved  Apache Software License
+oslo.utils==3.28.0      # OSI Approved  Apache Software License
+paramiko==2.2.1         # LGPL; OSI Approved  GNU Library or Lesser General Public License (LGPL)
+pbr==3.1.1              # OSI Approved  Apache Software License; Apache License, Version 2.0
+pika==0.10.0            # BSD; OSI Approved  BSD License
+positional==1.1.2       # OSI Approved  Apache Software License
+pycrypto==2.6.1         # Public Domain
+pyparsing==2.2.0        # MIT License; OSI Approved  MIT License
+pyroute2==0.4.21        # dual license GPLv2+ and Apache v2; OSI Approved  GNU General Public License v2 or later (GPLv2+); OSI Approved  Apache Software License
+pyrsistent==0.14.1      # LICENSE.mit; OSI Approved  MIT License
+python-cinderclient==3.1.0      # OSI Approved  Apache Software License
+python-glanceclient==2.8.0      # OSI Approved  Apache Software License
+python-heatclient==1.11.1       # OSI Approved  Apache Software License
+python-keystoneclient==3.13.0   # OSI Approved  Apache Software License
+python-neutronclient==6.5.0     # OSI Approved  Apache Software License
+python-novaclient==9.1.1        # OSI Approved  Apache Software License
+pyzmq==16.0.2           # LGPL+BSD; OSI Approved  GNU Library or Lesser General Public License (LGPL); OSI Approved  BSD License
+requests==2.18.2        # Apache 2.0; OSI Approved  Apache Software License
+requestsexceptions==1.3.0   # OSI Approved  Apache Software License
+scp==0.10.2             # LGPL
+shade==1.22.2           # OSI Approved  Apache Software License
+simplejson==3.13.2      # MIT License; OSI Approved  MIT License; OSI Approved  Academic Free License (AFL)
+six==1.10.0             # MIT; OSI Approved  MIT License
+stevedore==1.25.0       # OSI Approved  Apache Software License
+traceback2==1.4.0       # OSI Approved  Python Software Foundation License
+unicodecsv==0.14.1      # BSD License; OSI Approved  BSD License
+wrapt==1.10.10          # BSD
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..757faf5
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,49 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/setup.py
+from __future__ import absolute_import
+from setuptools import setup, find_packages
+
+
+setup(
+    name="vnftest",
+    version="0.1.dev0",
+    packages=find_packages(),
+    include_package_data=True,
+    package_data={
+        'vnftest': [
+            'onap/steps/onboard/*.yaml',
+            'onap/steps/create_service/*.yaml'
+        ],
+        'etc': [
+            'vnftest/*.yaml',
+            'vnftest/*.conf'
+        ],
+        'tests': [
+            'onap/*/*.yaml'
+        ]
+    },
+    url="https://www.onap.org",
+    entry_points={
+        'console_scripts': [
+            'vnftest=vnftest.main:main'
+        ],
+    },
+    scripts=[
+        'tools/vnftest-img-modify',
+        'tools/vnftest-img-lxd-modify',
+        'tools/vnftest-img-dpdk-modify'
+    ]
+)
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 0000000..f6d3949
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,30 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/test-requirements.txt
+
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+coverage==4.4.2             # Apache 2.0; OSI Approved  Apache Software License; http://www.apache.org/licenses/LICENSE-2.0; http://www.apache.org/licenses/LICENSE-2.0
+fixtures==3.0.0             # OSI Approved  BSD License; OSI Approved  Apache Software License
+packaging==16.8.0           # BSD or Apache License, Version 2.0
+pyflakes==1.0.0             # MIT; OSI Approved  MIT License
+pylint==1.8.1               # GPLv2
+python-subunit==1.2.0       # OSI Approved  Apache Software License; OSI Approved  BSD License
+testrepository==0.0.20      # OSI Approved  BSD License; OSI Approved  Apache Software License
+testtools==2.3.0            # OSI Approved  MIT License
+unittest2==1.1.0            # OSI Approved  BSD License
+
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/onap/test_cases/onap_vnftest_tc001.yaml b/tests/onap/test_cases/onap_vnftest_tc001.yaml
new file mode 100644
index 0000000..c6cc411
--- /dev/null
+++ b/tests/onap/test_cases/onap_vnftest_tc001.yaml
@@ -0,0 +1,42 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+---
+schema: "vnftest:task:0.1"
+description: >
+    Vnftest TC001 config file;
+    Create VLM in SDC
+
+{% set csar_name = csar_name or none %}
+{% set csar_id = csar_id or none %}
+{% set csar_package_location = csar_package_location or none %}
+steps:
+-
+  type: OnapApiCall
+  options:
+    file: "onboard/create_vlm.yaml"
+    input:
+    -
+      parameter_name: "vendor_name"
+      value: {{ ["test_vendor_", range(10000)|random ]|join }}
+    output:
+    -
+      parameter_name: "vendor_id"
+      path: "value"
+
+context:
+  type: CSAR
+  csar_name: {{csar_name}}
+  csar_id: {{csar_id}}
+  csar_package_location: {{csar_package_location}}
\ No newline at end of file
diff --git a/tests/onap/test_suites/onap_basic_lifecycle.yaml b/tests/onap/test_suites/onap_basic_lifecycle.yaml
new file mode 100644
index 0000000..6be8a1b
--- /dev/null
+++ b/tests/onap/test_suites/onap_basic_lifecycle.yaml
@@ -0,0 +1,25 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+---
+
+schema: "vnftest:suite:0.1"
+
+name: "onap-basic-lifecycle"
+test_cases_dir: "tests/onap/test_cases/"
+test_cases:
+-
+    file_name: onap_vnftest_tc001.yaml
+    task_args:
+        default: '{"csar_name": "dummy_package_name", "csar_id":"dummy123", "csar_package_location": "/etc/vnftest/dummy_package.csar"}'
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/unit/__init__.py
diff --git a/tests/unit/test_yaml_loader.py b/tests/unit/test_yaml_loader.py
new file mode 100644
index 0000000..361f1f7
--- /dev/null
+++ b/tests/unit/test_yaml_loader.py
@@ -0,0 +1,34 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tests/unit/common/test_yaml_loader.py
+
+from __future__ import absolute_import
+import unittest
+
+from vnftest.common import yaml_loader
+
+
+class TemplateFormatTestCase(unittest.TestCase):
+
+    def test_parse_to_value_exception(self):
+
+        self.assertEquals(yaml_loader.yaml_load("string"), u"string")
+
+
+def main():
+    unittest.main()
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/README b/tools/README
new file mode 100644
index 0000000..0edcf2d
--- /dev/null
+++ b/tools/README
@@ -0,0 +1,27 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/README
+
+This directory contains various utilities needed in the vnftest environment.
+
+vnftest-img-modify is a generic script (but ubuntu cloud image specific) that
+takes a another script as an argument. This second script does the actual
+modifications of the image. sudo is required since the base image is mounted
+using qemu's network block device support.
+
+Usage example:
+
+$ sudo vnftest-img-modify $HOME/vnftest/tools/ubuntu-server-cloudimg-modify.sh
+
diff --git a/tools/cover.awk b/tools/cover.awk
new file mode 100644
index 0000000..e4bb816
--- /dev/null
+++ b/tools/cover.awk
@@ -0,0 +1,25 @@
+BEGIN{
+    template = "%6s   %-75s\n"
+    printf template, "Delta", "Module Path"
+}
+
+/^-/{
+    s = substr($1, 2)
+    x[s] = $3;
+};
+
+/^+/{
+    s = substr($1, 2)
+    d = $3
+    if (s in x)
+       d = d - x[s]
+    y[s" "d] = d
+}
+
+END{
+    asorti(y, z1, "@val_num_asc")
+    for (i=1; i <= length(z1); i++){
+        split(z1[i], z2, " ")
+        printf template, z2[2], z2[1]
+    }
+}
diff --git a/tools/cover.sh b/tools/cover.sh
new file mode 100644
index 0000000..8acfb56
--- /dev/null
+++ b/tools/cover.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/cover.sh
+
+if [[ -n $COVER_DIR_NAME ]]; then
+    :
+elif [[ -n $_ ]]; then
+    COVER_DIR_NAME=$( dirname $_ )
+else
+    COVER_DIR_NAME=$( dirname $0 )
+fi
+
+show_diff () {
+    diff -U 0 $1 $2 | awk -f $COVER_DIR_NAME/cover.awk
+}
+
+run_coverage_test() {
+
+    ALLOWED_EXTRA_MISSING=10
+    # enable debugging
+    set -x
+
+    # Stash uncommitted changes, checkout master and save coverage report
+    uncommited=$(git status --porcelain | grep -v "^??")
+    [[ -n ${uncommited} ]] && git stash > /dev/null
+    git checkout HEAD^
+
+    baseline_report=$(mktemp -t vnftest_coverageXXXXXXX)
+
+    find . -type f -name "*.pyc" -delete
+
+    # Temporarily run tests from two directories, until all tests have moved
+    coverage run -p -m unittest discover ./tests/unit
+    coverage run -p -m unittest discover ./vnftest/tests/unit
+    coverage combine
+
+    # Temporarily omit vnftest/tests from the report
+    coverage report --omit=vnftest/tests/*/* > ${baseline_report}
+    coverage erase
+
+    # debug awk
+    tail -1 ${baseline_report}
+    baseline_missing=$(awk 'END { if (int($3) > 0) print $3 }' ${baseline_report})
+
+    if [[ -z $baseline_missing ]]; then
+        echo "Failed to determine baseline missing"
+        exit 1
+    fi
+
+    # Checkout back and unstash uncommitted changes (if any)
+    git checkout -
+    [[ -n ${uncommited} ]] && git stash pop > /dev/null
+
+    # Generate and save coverage report
+    current_report=$(mktemp -t vnftest_coverageXXXXXXX)
+
+    find . -type f -name "*.pyc" -delete
+
+    # Temporarily run tests from two directories, until all tests have moved
+    coverage run -p -m unittest discover ./tests/unit
+    coverage run -p -m unittest discover ./vnftest/tests/unit
+    coverage combine
+
+    # Temporarily omit vnftest/tests from the report
+    coverage report --omit=vnftest/tests/*/* > ${current_report}
+    coverage erase
+
+    rm -rf cover-$PY_VER
+    coverage html -d cover-$PY_VER
+
+    # debug awk
+    tail -1 ${current_report}
+    current_missing=$(awk 'END { if (int($3) > 0) print $3 }' ${current_report})
+
+    if [[ -z $current_missing ]]; then
+        echo "Failed to determine current missing"
+        exit 1
+    fi
+
+    # Show coverage details
+    new_missing=$((current_missing - baseline_missing))
+
+    echo "Missing lines allowed to introduce : ${ALLOWED_EXTRA_MISSING}"
+    echo "Missing lines introduced           : ${new_missing}"
+    echo "Missing lines in master            : ${baseline_missing}"
+    echo "Missing lines in proposed change   : ${current_missing}"
+
+    if [[ ${new_missing} -gt ${ALLOWED_EXTRA_MISSING} ]];
+    then
+        show_diff ${baseline_report} ${current_report}
+        echo "Please write more unit tests, we should keep our test coverage :( "
+        rm ${baseline_report} ${current_report}
+        exit 1
+
+    elif [[ ${new_missing} -gt 0 ]];
+    then
+        show_diff ${baseline_report} ${current_report}
+        echo "I believe you can cover all your code with 100% coverage!"
+
+    else
+        echo "Thank you! You are awesome! Keep writing unit tests! :)"
+    fi
+
+    rm ${baseline_report} ${current_report}
+}
diff --git a/tools/dpdk_install.yml b/tools/dpdk_install.yml
new file mode 100644
index 0000000..09898fd
--- /dev/null
+++ b/tools/dpdk_install.yml
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/dpdk_install.yml
+
+heat_template_version: 2015-04-30
+
+description: >
+  Used to run VMs with DPDK pktgen
+
+parameters:
+  image:
+    type: string
+    description: Name of the image
+    default: vnftest-wily-server
+
+  timeout:
+    type: number
+    description: Timeout in seconds for WaitCondition, depends on your image and environment
+    default: 900
+
+  external_net_name:
+    type: string
+    description: Name of the external network which management network will connect to
+    default: admin_floating_net
+
+resources:
+  flavor:
+    type: OS::Nova::Flavor
+    properties:
+      ram: 4096
+      vcpus: 4
+      disk: 4
+
+  network:
+    type: OS::Neutron::Net
+    properties:
+      name: dpdk_net
+
+  subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      name: dpdk_subnet
+      ip_version: 4
+      cidr: 192.168.0.0/24
+      network: { get_resource: network }
+
+  management_router:
+    type: OS::Neutron::Router
+    properties:
+      name: management_router
+      external_gateway_info:
+        network: { get_param: external_net_name }
+
+  management_router_interface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router: { get_resource: management_router }
+      subnet: { get_resource: subnet }
+
+  floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network: { get_param: external_net_name }
+
+  floating_ip_association:
+    type: OS::Nova::FloatingIPAssociation
+    properties:
+      floating_ip: { get_resource: floating_ip }
+      server_id: {get_resource: dpdk_vm}
+
+  keypair:
+    type: OS::Nova::KeyPair
+    properties:
+      name: vnftest-key
+      public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD0RkXfW6pksd1cZmXuvXZF/Mlqqq3ahIGcGoULOC97XMpu0vdxMpcUwdjwGqMwEXTVyfHidu0l99bLqOCpSUKCmbWx3ONJ+1kqFx4HwsKEWLiyDYqsuMrDeZT1eFjC5avCoTcrIw2wq5NaBb00lDGagNZOeopaL5YIa4+PizEY23+cir24D67NU21Fg3JE92AIeGlNa4j66L3a+lL0hZq74Dilmp42wm4GsbplRO6KJfyaraowHb1X+TmhCjBgHk6M/OJ9yPAroZyJNcwjMAuuxhAYWRuT3SdbnoUR0RG2VhfDh0qNid7vOqLbhKPeaLLFmzkN+9w3WdCp6LbSYt87 vnftest@vnftest.onap.org
+
+  wait_handle:
+    type: OS::Heat::WaitConditionHandle
+
+  wait_condition:
+    type: OS::Heat::WaitCondition
+    properties:
+      handle: { get_resource: wait_handle }
+      count: 1
+      timeout: { get_param: timeout }
+
+  dpdk_vm:
+    type: OS::Nova::Server
+    depends_on: [subnet, keypair, flavor]
+    properties:
+      name: { get_param: "OS::stack_name" }
+      image: { get_param: image }
+      flavor: { get_resource: flavor }
+      key_name: {get_resource: keypair}
+      networks:
+        - network: { get_resource: network }
+      config_drive: True
+      user_data_format : RAW
+      user_data:
+        str_replace:
+          template: |
+            #!/bin/sh
+            cat <<'CEOF' > /tmp/dpdk_post_build.sh
+            export RTE_SDK=/dpdk
+            export RTE_TARGET=x86_64-native-linuxapp-gcc
+            cd /dpdk
+            make install T=x86_64-native-linuxapp-gcc DESTDIR=destdir
+            modprobe uio
+            insmod /dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+            insmod /dpdk/x86_64-native-linuxapp-gcc/kmod/rte_kni.ko
+            cd /pktgen-dpdk
+            make RTE_SDK=/dpdk
+            echo "PCKTGEN BUILT"
+            rm -rf /var/lib/cloud/instances
+            echo "rm succesfull"
+            ls /dpdk/x86_64-native-linuxapp-gcc/kmod/
+            $NOTIFY --data-binary '{"status": "SUCCESS"}'
+            CEOF
+            chmod +x /tmp/dpdk_post_build.sh
+            echo "chmod"
+            nohup /tmp/dpdk_post_build.sh &
+          params:
+            $NOTIFY: { get_attr: ['wait_handle', 'curl_cli'] }
+
+outputs:
+  vm_uuid:
+    description: uuid of the VM
+    value: { get_attr: [ dpdk_vm, show,id ] }
diff --git a/tools/os-requirements-check.py b/tools/os-requirements-check.py
new file mode 100644
index 0000000..a9a28d7
--- /dev/null
+++ b/tools/os-requirements-check.py
@@ -0,0 +1,112 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/os-requirements-check.py
+import argparse
+import collections
+import os
+from packaging import version as pkg_version
+import sys
+
+from openstack_requirements import requirement
+
+
+PROJECT_REQUIREMENTS_FILES = ['requirements.txt']
+QUALIFIER_CHARS = ['<', '>', '!', '=']
+
+
+def _grab_args():
+    """Grab and return arguments"""
+    parser = argparse.ArgumentParser(
+        description='Check if project requirements have changed')
+
+    parser.add_argument('env_dir', help='tox environment directory')
+    return parser.parse_args()
+
+
+def _extract_reqs(file_name, blacklist=None):
+    blacklist = blacklist or {}
+    content = open(file_name, 'rt').read()
+    reqs = collections.defaultdict(tuple)
+    parsed = requirement.parse(content)
+    for name, entries in ((name, entries) for (name, entries) in parsed.items()
+                          if (name and name not in blacklist)):
+        list_reqs = [r for (r, line) in entries]
+        # Strip the comments out before checking if there are duplicates
+        list_reqs_stripped = [r._replace(comment='') for r in list_reqs]
+        if len(list_reqs_stripped) != len(set(list_reqs_stripped)):
+            print('Requirements file %s has duplicate entries for package '
+                  '"%s: %r' % (file_name, name, list_reqs))
+        reqs[name] = list_reqs
+    return reqs
+
+
+def _extract_qualifier_version(specifier):
+    index = 1
+    # Find qualifier (one or two chars).
+    if specifier[0] in QUALIFIER_CHARS and specifier[1] in QUALIFIER_CHARS:
+        index = 2
+    qualifier = specifier[:index]
+    version = pkg_version.Version(specifier[index:])
+    return qualifier, version
+
+
+def main():
+    args = _grab_args()
+
+    # Build a list of requirements from the global list in the
+    # openstack/requirements project so we can match them to the changes
+    env_dir = args.env_dir
+    req_dir = env_dir + '/src/os-requirements/'
+    global_reqs = _extract_reqs(req_dir + '/global-requirements.txt')
+    blacklist = _extract_reqs(req_dir + '/blacklist.txt')
+
+    # Build a list of project requirements.
+    failed = False
+    local_dir = os.getcwd()
+    for file_name in PROJECT_REQUIREMENTS_FILES:
+        print('Validating requirements file "%s"' % file_name)
+        proj_reqs = _extract_reqs(local_dir + '/' + file_name,
+                                  blacklist=blacklist)
+
+        for name, req in proj_reqs.items():
+            global_req = global_reqs.get(name)
+            if not global_req:
+                continue
+            global_req = global_req[0]
+            req = req[0]
+            if not global_req.specifiers:
+                continue
+
+            specifiers = global_req.specifiers.split(',')
+            for spec in specifiers:
+                _, req_version = _extract_qualifier_version(req.specifiers)
+                g_qualifier, g_version = _extract_qualifier_version(spec)
+                if g_qualifier == '!=' and g_version == req_version:
+                    print('Package "%s" version %s is not compatible' %
+                          (name, req_version))
+                    failed = True
+                if g_qualifier == '>=' and g_version > req_version:
+                    print('Package "%s" version %s outdated, minimum version '
+                          '%s' % (name, req_version, g_version))
+                    failed = True
+
+    if failed:
+        print('Incompatible requirement found!')
+        sys.exit(1)
+    print('Updated requirements match openstack/requirements')
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/run_tests.sh b/tools/run_tests.sh
new file mode 100755
index 0000000..4f7aea3
--- /dev/null
+++ b/tools/run_tests.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/run_tests.sh
+
+# Run vnftest's unit, coverage, functional test
+
+getopts ":f" FILE_OPTION
+opts=$@ # get other args
+
+# don't write .pyc files this can cause odd unittest results
+export PYTHONDONTWRITEBYTECODE=1
+
+PY_VER="py$( python --version | sed 's/[^[:digit:]]//g' | cut -c-2 )"
+export PY_VER
+
+COVER_DIR_NAME="./tools/"
+export COVER_DIR_NAME
+
+run_tests() {
+    echo "Get external libs needed for unit test"
+
+    echo "Running unittest ... "
+    if [ $FILE_OPTION == "f" ]; then
+        python -m unittest discover -v -s tests/unit > $logfile 2>&1
+    else
+        python -m unittest discover -v -s tests/unit
+    fi
+
+    if [ $? -ne 0 ]; then
+        if [ $FILE_OPTION == "f" ]; then
+            echo "FAILED, results in $logfile"
+        fi
+        exit 1
+    else
+        if [ $FILE_OPTION == "f" ]; then
+            echo "OK, results in $logfile"
+        fi
+    fi
+}
+
+run_coverage() {
+    source $COVER_DIR_NAME/cover.sh
+    run_coverage_test
+}
+
+run_functional_test() {
+
+    mkdir -p .testrepository
+    python -m subunit.run discover vnftest/tests/functional > .testrepository/subunit.log
+
+    subunit2pyunit < .testrepository/subunit.log
+    EXIT_CODE=$?
+    subunit-stats < .testrepository/subunit.log
+
+    if [ $EXIT_CODE -ne 0 ]; then
+        exit 1
+    else
+        echo "OK"
+    fi
+}
+
+if [[ $opts =~ "--unit" ]]; then
+    run_tests
+fi
+
+if [[ $opts =~ "--coverage" ]]; then
+    run_coverage
+fi
+
+if [[ $opts =~ "--functional" ]]; then
+    run_functional_test
+fi
+
+if [[ -z $opts ]]; then
+    echo "No tests to run!!"
+    echo "Usage: run_tests.sh [--unit] [--coverage] [--functional]"
+    exit 1
+fi
diff --git a/tools/ubuntu-server-cloudimg-dpdk-modify.sh b/tools/ubuntu-server-cloudimg-dpdk-modify.sh
new file mode 100755
index 0000000..2a44eb8
--- /dev/null
+++ b/tools/ubuntu-server-cloudimg-dpdk-modify.sh
@@ -0,0 +1,143 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/ubuntu-server-clouding-dpmdk-modify.sh
+
+# installs required packages
+# must be run from inside the image (either chrooted or running)
+
+set -ex
+
+if [ $# -eq 1 ]; then
+    nameserver_ip=$1
+
+    # /etc/resolv.conf is a symbolic link to /run, restore at end
+    rm /etc/resolv.conf
+    echo "nameserver $nameserver_ip" > /etc/resolv.conf
+    echo "nameserver 8.8.8.8" >> /etc/resolv.conf
+    echo "nameserver 8.8.4.4" >> /etc/resolv.conf
+fi
+
+# iperf3 only available for wily in backports
+grep wily /etc/apt/sources.list && \
+    echo "deb http://archive.ubuntu.com/ubuntu/ wily-backports main restricted universe multiverse" >> /etc/apt/sources.list
+
+# Workaround for building on CentOS (apt-get is not working with http sources)
+# sed -i 's/http/ftp/' /etc/apt/sources.list
+
+# Force apt to use ipv4 due to build problems on LF POD.
+echo 'Acquire::ForceIPv4 "true";' > /etc/apt/apt.conf.d/99force-ipv4
+
+echo 'GRUB_CMDLINE_LINUX="resume=/dev/sda1 default_hugepagesz=1G hugepagesz=1G hugepages=2 iommu=on iommu=pt intel_iommu=on"' >> /etc/default/grub
+echo 'vm.nr_hugepages=1024' >> /etc/sysctl.conf
+echo 'huge /mnt/huge hugetlbfs defaults 0 0' >> vi /etc/fstab
+
+mkdir /mnt/huge
+chmod 777 /mnt/huge
+
+for i in {1..2}
+do
+    touch /etc/network/interfaces.d/eth$i.cfg
+    chmod 777 /etc/network/interfaces.d/eth$i.cfg
+    echo "auto eth$i" >> /etc/network/interfaces.d/eth$i.cfg
+    echo "iface eth$i inet dhcp" >> /etc/network/interfaces.d/eth$i.cfg
+done
+
+# this needs for checking dpdk status, adding interfaces to dpdk, bind, unbind etc..
+
+# Add hostname to /etc/hosts.
+# Allow console access via pwd
+cat <<EOF >/etc/cloud/cloud.cfg.d/10_etc_hosts.cfg
+manage_etc_hosts: True
+password: RANDOM
+chpasswd: { expire: False }
+ssh_pwauth: True
+EOF
+
+linuxheadersversion=$(echo ls /boot/vmlinuz* | cut -d- -f2-)
+
+apt-get update
+apt-get install -y \
+    bc \
+    fio \
+    gcc \
+    git \
+    iperf3 \
+    iproute2 \
+    ethtool \
+    linux-tools-common \
+    linux-tools-generic \
+    lmbench \
+    make \
+    unzip \
+    netperf \
+    patch \
+    perl \
+    rt-tests \
+    stress \
+    sysstat \
+    linux-headers-"${linuxheadersversion}" \
+    libpcap-dev \
+    lua5.2 \
+    net-tools \
+    wget \
+    unzip \
+    libpcap-dev \
+    ncurses-dev \
+    libedit-dev \
+    pciutils \
+    pkg-config \
+    liblua5.2-dev \
+    libncursesw5-dev \
+    ncurses-dev \
+    libedit-dev
+
+dpkg -L liblua5.2-dev
+cp /usr/include/lua5.2/lua.h /usr/include/
+cp /usr/include/lua5.2/lua.h /usr/include/x86_64-linux-gnu/
+
+git clone http://dpdk.org/git/dpdk
+git clone http://dpdk.org/git/apps/pktgen-dpdk
+
+CLONE_DEST=/opt/tempT
+# remove before cloning
+rm -rf -- "${CLONE_DEST}"
+git clone https://github.com/kdlucas/byte-unixbench.git "${CLONE_DEST}"
+make --directory "${CLONE_DEST}/UnixBench/"
+
+git clone https://github.com/beefyamoeba5/ramspeed.git "${CLONE_DEST}/RAMspeed"
+cd "${CLONE_DEST}/RAMspeed/ramspeed-2.6.0"
+mkdir temp
+bash build.sh
+
+git clone https://github.com/beefyamoeba5/cachestat.git "${CLONE_DEST}"/Cachestat
+
+cd /root
+wget http://dpdk.org/browse/dpdk/snapshot/dpdk-17.02.zip
+unzip dpdk-17.02.zip
+cd dpdk-17.02
+make install T=x86_64-native-linuxapp-gcc
+
+cd /root
+wget https://01.org/sites/default/files/downloads/intelr-data-plane-performance-demonstrators/dppd-prox-v035.zip
+unzip dppd-prox-v035.zip
+cd dppd-PROX-v035
+chmod +x helper-scripts/trailing.sh
+export RTE_SDK=/root/dpdk-17.02
+export RTE_TARGET=x86_64-native-linuxapp-gcc
+make
+
+# restore symlink
+ln -sfrT /run/resolvconf/resolv.conf /etc/resolv.conf
diff --git a/tools/ubuntu-server-cloudimg-modify.sh b/tools/ubuntu-server-cloudimg-modify.sh
new file mode 100755
index 0000000..50a9a4c
--- /dev/null
+++ b/tools/ubuntu-server-cloudimg-modify.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/ubuntu-server-clouding-modify.sh
+
+# installs required packages
+# must be run from inside the image (either chrooted or running)
+
+set -ex
+
+if [ $# -eq 1 ]; then
+    nameserver_ip=$1
+
+    # /etc/resolv.conf is a symbolic link to /run, restore at end
+    rm /etc/resolv.conf
+    echo "nameserver $nameserver_ip" > /etc/resolv.conf
+    echo "nameserver 8.8.8.8" >> /etc/resolv.conf
+    echo "nameserver 8.8.4.4" >> /etc/resolv.conf
+fi
+
+# iperf3 only available for trusty in backports
+if grep -q trusty /etc/apt/sources.list ; then
+    if [ "${YARD_IMG_ARCH}" = "arm64" ]; then
+        echo "deb [arch=${YARD_IMG_ARCH}] http://ports.ubuntu.com/ trusty-backports main restricted universe multiverse" >> /etc/apt/sources.list
+    else
+        echo "deb http://archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe multiverse" >> /etc/apt/sources.list
+    fi
+fi
+# Workaround for building on CentOS (apt-get is not working with http sources)
+# sed -i 's/http/ftp/' /etc/apt/sources.list
+
+# Force apt to use ipv4 due to build problems on LF POD.
+echo 'Acquire::ForceIPv4 "true";' > /etc/apt/apt.conf.d/99force-ipv4
+
+# Add hostname to /etc/hosts.
+# Allow console access via pwd
+cat <<EOF >/etc/cloud/cloud.cfg.d/10_etc_hosts.cfg
+manage_etc_hosts: True
+password: RANDOM
+chpasswd: { expire: False }
+ssh_pwauth: True
+EOF
+apt-get update
+apt-get install -y \
+    bc \
+    bonnie++ \
+    fio \
+    git \
+    gcc \
+    iperf3 \
+    ethtool \
+    iproute2 \
+    linux-tools-common \
+    linux-tools-generic \
+    lmbench \
+    make \
+    netperf \
+    patch \
+    perl \
+    rt-tests \
+    stress \
+    sysstat
+
+CLONE_DEST=/opt/tempT
+
+# remove before cloning
+rm -rf -- "${CLONE_DEST}"
+
+git clone https://github.com/kdlucas/byte-unixbench.git "${CLONE_DEST}"
+
+make --directory "${CLONE_DEST}/UnixBench/"
+
+git clone https://github.com/beefyamoeba5/ramspeed.git "${CLONE_DEST}/RAMspeed"
+
+cd "${CLONE_DEST}/RAMspeed/ramspeed-2.6.0"
+mkdir temp
+bash build.sh
+
+git clone https://github.com/beefyamoeba5/cachestat.git "${CLONE_DEST}/Cachestat"
+
+# restore symlink
+ln -sf /run/resolvconf/resolv.conf /etc/resolv.conf
diff --git a/tools/vnftest-img-dpdk-finalize.sh b/tools/vnftest-img-dpdk-finalize.sh
new file mode 100644
index 0000000..f94de26
--- /dev/null
+++ b/tools/vnftest-img-dpdk-finalize.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/vnftest-img-dpmdk-finalize.sh
+
+# installs dpdk and pktgen packages on modified image
+
+# PREREQUISITES
+# modified image (vnftest-wily-server) must be uploaded to OpenStack
+# heat must be installed: apt-get install python-heatclient, python-glanceclient, python-nova
+# must have a public vnftest-key uploaded in openstack
+# must have a proper flavor for the image (i.e. m1.small)
+
+
+stackname="vnftest-modify-stack"
+template=dpdk_install.yml
+new_image_name="vnftest-image-pktgen-ready"
+
+openstack stack create $stackname -f yaml -t $template
+progress="WARMING_UP"
+
+while [ "$progress" != "CREATE_COMPLETE" ]
+do
+  sleep 10
+  echo "check stack status......."
+  show_output=$(openstack stack show $stackname)
+  progress=$(echo $show_output | sed 's/^.*stack_status . \([^ ]*\).*$/\1/')
+  echo "$progress"
+  if [ "$progress" == "CREATE_FAILED" ];then
+    echo "create $stackname failed"
+    exit 1
+  fi
+done
+
+# workaround: Without wait time, the file size of pktgen is zero in the snapshot.
+sleep 60
+
+status=$(nova image-create --poll $stackname $new_image_name)
+if [[ "$status" =~ "Finished" ]];then
+  echo "$new_image_name finished"
+fi
+
+nova delete $stackname
+sleep 10
+openstack stack delete --yes $stackname
diff --git a/tools/vnftest-img-dpdk-modify b/tools/vnftest-img-dpdk-modify
new file mode 100644
index 0000000..2f718bd
--- /dev/null
+++ b/tools/vnftest-img-dpdk-modify
@@ -0,0 +1,169 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/vnftest-img-dpmdk-modify
+
+# vnftest-img-dpdk-modify - download and modify a Ubuntu cloud image
+#
+# The actual customization is done by a script passed with an absolute path as
+# the only single argument. The command needs to be invoked as sudo
+#
+# Example invocation:
+# vnftest-img-dpdk-modify /home/vnftest/tools/ubuntu-server-cloudimg-dpdk-modify.sh
+#
+# Warning: the script will create files by default in:
+#   /tmp/workspace/vnftest
+# the files will be owned by root!
+#
+# TODO: image resize is needed if the base image is too small
+#
+
+set -e
+set -x
+
+die() {
+    echo "error: $1" >&2
+    exit 1
+}
+
+test $# -eq 1 || die "no image specific script as argument"
+test $(id -u) -eq 0 || die "should invoke using sudo"
+
+cmd=$1
+test -x $cmd
+mountdir="/mnt/vnftest"
+
+workspace=${WORKSPACE:-"/tmp/workspace/vnftest"}
+host=${HOST:-"cloud-images.ubuntu.com"}
+release=${RELEASE:-"wily"}
+image_path="${release}/current/${release}-server-cloudimg-amd64-disk1.img"
+image_url=${IMAGE_URL:-"https://${host}/${image_path}"}
+sha256sums_path="${release}/current/SHA256SUMS"
+sha256sums_url=${SHA256SUMS_URL:-"https://${host}/${sha256sums_path}"}
+
+imgfile="${workspace}/vnftest-${release}-server"
+raw_imgfile="${workspace}/vnftest-${release}-server.raw"
+filename=$(basename $image_url)
+
+# download and checksum base image, conditionally if local copy is outdated
+download() {
+    test -d $workspace || mkdir -p $workspace
+    cd $workspace
+    rm -f SHA256SUMS # always download the checksum file to a detect stale image
+    wget $sha256sums_url
+    test -e $filename || wget -nc $image_url
+    grep $filename SHA256SUMS | sha256sum -c ||
+    if [ $? -ne 0 ]; then
+        rm $filename
+        wget -nc $image_url
+        grep $filename SHA256SUMS | sha256sum -c
+    fi
+    qemu-img convert $filename $raw_imgfile
+    cd -
+}
+
+# mount image
+setup() {
+    mkdir -p $mountdir
+
+    for i in $(seq 0 9); do
+        [ -a /dev/loop$i ] || mknod -m 660 /dev/loop$i b 7 $i
+    done
+
+    loopdevice=$(kpartx -l $raw_imgfile | head -1 | cut -f1 -d ' ')
+
+    kpartx -a $raw_imgfile
+
+    mount /dev/mapper/$loopdevice $mountdir
+    mount -t proc none $mountdir/proc
+
+    echo $loopdevice
+
+    sudo resize2fs /dev/mapper/$loopdevice
+
+    cp $cmd $mountdir/$(basename $cmd)
+}
+
+# modify image running a script using in a chrooted environment
+modify() {
+    # resolv.conf does not exist in base image, pass nameserver value from host
+    nameserver_ip=$(grep -m 1 '^nameserver' \
+        /etc/resolv.conf | awk '{ print $2 '})
+
+    # prevent init scripts from running during install
+    echo $'#!/bin/sh\nexit 101' >$mountdir/usr/sbin/policy-rc.d
+    chmod a+x $mountdir/usr/sbin/policy-rc.d
+
+    chroot $mountdir /$(basename $cmd) $nameserver_ip
+
+    rm -rf $mountdir/usr/sbin/policy-rc.d
+
+    umount -f $mountdir/proc
+    umount $mountdir
+
+    qemu-img convert -c -o compat=0.10 -O qcow2 $raw_imgfile $imgfile
+#    qemu-img convert -O vmdk $raw_imgfile $imgfile
+
+    if dmsetup table | grep $loopdevice; then
+       dmsetup clear $loopdevice || true
+    fi
+}
+
+# cleanup (umount) the image
+cleanup() {
+    # designed to be idempotent
+    mount | grep $mountdir/proc && umount $mountdir/proc
+    mount | grep $mountdir && umount $mountdir
+    if [ -f $raw_imgfile ]; then
+        kpartx -dv $raw_imgfile || true
+    fi
+    rm -f $raw_imgfile
+    rm -rf $mountdir
+}
+
+exitcode=""
+error_trap()
+{
+    local rc=$?
+
+    set +e
+
+    if [ -z "$exitcode" ]; then
+        exitcode=$rc
+    fi
+
+    cleanup
+
+    echo "Image build failed with $exitcode"
+
+    exit $exitcode
+}
+
+main() {
+    cleanup
+
+    trap "error_trap" EXIT SIGTERM
+
+    download
+    setup
+    modify
+    trap - EXIT SIGTERM
+    cleanup
+
+    echo "the modified image is found here: $imgfile"
+}
+
+main
diff --git a/tools/vnftest-img-lxd-modify b/tools/vnftest-img-lxd-modify
new file mode 100755
index 0000000..5b54a7c
--- /dev/null
+++ b/tools/vnftest-img-lxd-modify
@@ -0,0 +1,143 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/vnftest-img-lxd-modify
+
+# vnftest-img-lxd-modify - download and modify a Ubuntu cloud image
+#
+# The actual customization is done by a script passed with an absolute path as
+# the only single argument. The command needs to be invoked as sudo
+#
+# Example invocation:
+# vnftest-img-lxd-modify /home/vnftest/tools/ubuntu-server-cloudimg-modify.sh
+#
+# Warning: the script will create files by default in:
+#   /tmp/workspace/vnftest
+# the files will be owned by root!
+#
+# TODO: image resize is needed if the base image is too small
+#
+
+set -e
+set -x
+
+die() {
+    echo "error: $1" >&2
+    exit 1
+}
+
+test $# -eq 1 -o $# -eq 2 || die "no image specific script as argument"
+test $(id -u) -eq 0 || die "should invoke using sudo"
+
+cmd=$1
+RELEASE=$2
+test -x $cmd
+mountdir="/mnt/vnftest"
+workspace=${WORKSPACE:-"/tmp/workspace/vnftest"}
+host=${HOST:-"cloud-images.ubuntu.com"}
+release=${RELEASE:-"xenial"}
+image_path="${release}/current/${release}-server-cloudimg-amd64-root.tar.gz"
+image_url=${IMAGE_URL:-"https://${host}/${image_path}"}
+sha256sums_path="${release}/current/SHA256SUMS"
+sha256sums_url=${SHA256SUMS_URL:-"https://${host}/${sha256sums_path}"}
+
+imgfile="${workspace}/vnftest-image.tar.gz"
+filename=$(basename $image_url)
+
+# download and checksum base image, conditionally if local copy is outdated
+download() {
+    test -d $workspace || mkdir -p $workspace
+    cd $workspace
+    rm -f SHA256SUMS # always download the checksum file to a detect stale image
+    wget $sha256sums_url
+    test -e $filename || wget -nc --progress=dot:giga $image_url
+    grep $filename SHA256SUMS | sha256sum -c ||
+    if [ $? -ne 0 ]; then
+        rm $filename
+        wget -nc --progress=dot:giga $image_url
+        grep $filename SHA256SUMS | sha256sum -c
+    fi
+    cd -
+}
+
+# extract files
+setup() {
+    mkdir -p $mountdir
+
+    cp $cmd $mountdir/$(basename $cmd)
+
+    cd $workspace
+    tar zxvf $filename -C $mountdir
+}
+
+# modify image running a script using in a chrooted environment
+modify() {
+    # resolv.conf does not exist in base image, pass nameserver value from host
+    nameserver_ip=$(grep -m 1 '^nameserver' \
+        /etc/resolv.conf | awk '{ print $2 '})
+
+    # prevent init scripts from running during install
+    echo $'#!/bin/sh\nexit 101' >$mountdir/usr/sbin/policy-rc.d
+    chmod a+x $mountdir/usr/sbin/policy-rc.d
+
+    chroot $mountdir /$(basename $cmd) $nameserver_ip
+
+    rm -rf $mountdir/usr/sbin/policy-rc.d
+
+    tar zcvf $(basename $imgfile) $mountdir/
+}
+
+# cleanup the image
+cleanup() {
+    rm -rf $mountdir
+}
+
+exitcode=""
+error_trap()
+{
+    local rc=$?
+
+    set +e
+
+    if [ -z "$exitcode" ]; then
+        exitcode=$rc
+    fi
+
+    dmesg -T | tail -50
+
+    cleanup
+
+    echo "Image build failed with $exitcode"
+
+    exit $exitcode
+}
+
+main() {
+    cleanup
+
+    trap "error_trap" EXIT SIGTERM
+
+    download
+    setup
+    modify
+
+    trap - EXIT SIGTERM
+    cleanup
+
+    echo "the modified image is found here: $imgfile"
+}
+
+main
diff --git a/tools/vnftest-img-modify b/tools/vnftest-img-modify
new file mode 100755
index 0000000..d3d1fcf
--- /dev/null
+++ b/tools/vnftest-img-modify
@@ -0,0 +1,191 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/vnftest-img-modify
+
+# vnftest-img-modify - download and modify a Ubuntu cloud image
+#
+# The actual customization is done by a script passed with an absolute path as
+# the only single argument. The command needs to be invoked as sudo
+#
+# Example invocation:
+# vnftest-img-modify /home/vnftest/tools/ubuntu-server-cloudimg-modify.sh
+#
+# Warning: the script will create files by default in:
+#   /tmp/workspace/vnftest
+# the files will be owned by root!
+#
+# TODO: image resize is needed if the base image is too small
+#
+set -e
+set -x
+
+die() {
+    echo "error: $1" >&2
+    exit 1
+}
+
+test $# -eq 1 -o $# -eq 2 || die "no image specific script as argument"
+test $(id -u) -eq 0 || die "should invoke using sudo"
+
+cmd=$1
+RELEASE=$2
+test -x $cmd
+mountdir="/mnt/vnftest"
+workspace=${WORKSPACE:-"/tmp/workspace/vnftest"}
+host=${HOST:-"cloud-images.ubuntu.com"}
+release=${RELEASE:-"xenial"}
+boot_mode="disk1"
+if [[ "${YARD_IMG_ARCH}" = "arm64" ]]; then
+    boot_mode="uefi1"
+fi
+
+image_path="${release}/current/${release}-server-cloudimg-${YARD_IMG_ARCH}-${boot_mode}.img"
+image_url=${IMAGE_URL:-"https://${host}/${image_path}"}
+sha256sums_path="${release}/current/SHA256SUMS"
+sha256sums_url=${SHA256SUMS_URL:-"https://${host}/${sha256sums_path}"}
+
+imgfile="${workspace}/vnftest-image.img"
+raw_imgfile_basename="vnftest-${release}-server.raw"
+raw_imgfile="${workspace}/${raw_imgfile_basename}"
+filename=$(basename $image_url)
+
+apt-get install -y parted
+
+# download and checksum base image, conditionally if local copy is outdated
+download() {
+    test -d $workspace || mkdir -p $workspace
+    cd $workspace
+    rm -f SHA256SUMS # always download the checksum file to a detect stale image
+    wget $sha256sums_url
+    test -e $filename || wget -nc --progress=dot:giga $image_url
+    grep $filename SHA256SUMS | sha256sum -c ||
+    if [ $? -ne 0 ]; then
+        rm $filename
+        wget -nc --progress=dot:giga $image_url
+        grep $filename SHA256SUMS | sha256sum -c
+    fi
+
+    for i in $(seq 0 9); do
+        [ -a /dev/loop$i ] || mknod -m 660 /dev/loop$i b 7 $i
+    done
+
+    qemu-img convert $filename $raw_imgfile
+    cd -
+}
+
+# mount image
+setup() {
+    # qemu-img resize $raw_imgfile +5GB
+    mkdir -p $mountdir
+
+    loopdevice=$(kpartx -l $raw_imgfile | head -1 | cut -f1 -d ' ')
+
+    kpartx -av $raw_imgfile
+
+    # for trouble shooting
+    sleep 2
+    dmsetup ls
+    parted -l /dev/${loopdevice:0:5} || true
+    mount /dev/mapper/$loopdevice $mountdir
+    mount -t proc none $mountdir/proc
+
+    cp $cmd $mountdir/$(basename $cmd)
+    if [ "${YARD_IMG_ARCH}" = "arm64" ]; then
+        cp /usr/bin/qemu-aarch64-static $mountdir/usr/bin
+    fi
+}
+
+# modify image running a script using in a chrooted environment
+modify() {
+    # resolv.conf does not exist in base image, pass nameserver value from host
+    nameserver_ip=$(grep -m 1 '^nameserver' \
+        /etc/resolv.conf | awk '{ print $2 '})
+
+    # prevent init scripts from running during install
+    echo $'#!/bin/sh\nexit 101' >$mountdir/usr/sbin/policy-rc.d
+    chmod a+x $mountdir/usr/sbin/policy-rc.d
+
+    chroot $mountdir /$(basename $cmd) $nameserver_ip
+
+    rm -rf $mountdir/usr/sbin/policy-rc.d
+
+    umount -f $mountdir/proc
+    umount $mountdir
+
+    qemu-img convert -c -o compat=0.10 -O qcow2 $raw_imgfile $imgfile
+
+    if dmsetup table | grep $loopdevice; then
+       dmsetup clear $loopdevice || true
+    fi
+}
+
+# cleanup (umount) the image
+cleanup() {
+    # designed to be idempotent
+    mount | grep $mountdir/proc && umount $mountdir/proc
+    mount | grep $mountdir && umount $mountdir
+    mount | grep "/mnt/${release}" && umount "/mnt/${release}"
+
+    if [ -f "${raw_imgfile}" ]; then
+        #kpartx -dv $raw_imgfile sometimes failed, we should checked it agein.
+        #if [ -z "$(kpartx -l $raw_imgfile | grep 'loop deleted')" ]; then
+        #    kpartx -dv $raw_imgfile
+        #fi
+        kpartx -dv $raw_imgfile || true
+    fi
+
+    rm -f $raw_imgfile
+    rm -rf $mountdir
+}
+
+exitcode=""
+error_trap()
+{
+    local rc=$?
+
+    set +e
+
+    if [ -z "$exitcode" ]; then
+        exitcode=$rc
+    fi
+
+    dmesg -T | tail -50
+
+    cleanup
+
+    echo "Image build failed with $exitcode"
+
+    exit $exitcode
+}
+
+main() {
+    cleanup
+
+    trap "error_trap" EXIT SIGTERM
+
+    download
+    setup
+    modify
+
+    trap - EXIT SIGTERM
+    cleanup
+
+    echo "the modified image is found here: $imgfile"
+}
+
+main
+
diff --git a/tools/vsperf-img-finalize.sh b/tools/vsperf-img-finalize.sh
new file mode 100755
index 0000000..e0126e4
--- /dev/null
+++ b/tools/vsperf-img-finalize.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/vsperf-img-finalize
+
+# PREREQUISITES
+# modified image (vnftest-vsperf) must be uploaded to OpenStack
+# must have a proper flavor (vsperf-flavor) for the image e.g.
+# nova flavor-create vsperf-flavor auto 8192 80 6
+# nova flavor-key vsperf-flavor set hw:numa_nodes=1
+# nova flavor-key vsperf-flavor set hw:mem_page_size=1GB
+# nova flavor-key vsperf-flavor set hw:cpu_policy=dedicated
+# nova flavor-key vsperf-flavor set hw:vif_multiqueue_enabled=true
+
+stackname="vsperf-install-stack"
+template=vsperf_install.yml
+new_image_name="vnftest-vsperf-server"
+
+openstack stack create $stackname -f yaml -t $template
+progress="WARMING_UP"
+
+while [ "$progress" != "CREATE_COMPLETE" ]
+do
+  sleep 10
+  echo "check stack status......."
+  show_output=$(openstack stack show $stackname)
+  progress=$(echo $show_output | sed 's/^.*stack_status . \([^ ]*\).*$/\1/')
+  echo "$progress"
+  if [ "$progress" == "CREATE_FAILED" ];then
+    echo "create $stackname failed"
+    exit 1
+  fi
+done
+
+# has to stop the instance before taking the snapshot
+nova stop $stackname
+sleep 10
+
+status=$(nova image-create --poll $stackname $new_image_name)
+if [[ "$status" =~ "Finished" ]];then
+  echo "$new_image_name finished"
+fi
+
+nova delete $stackname
+sleep 10
+openstack stack delete --yes $stackname
diff --git a/tools/vsperf-img-modify.sh b/tools/vsperf-img-modify.sh
new file mode 100755
index 0000000..d90adbf
--- /dev/null
+++ b/tools/vsperf-img-modify.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/vsperf-img-modify.sh
+
+# installs required packages
+# must be run from inside the image (either chrooted or running)
+
+set -ex
+
+if [ $# -eq 1 ]; then
+    nameserver_ip=$1
+
+    # /etc/resolv.conf is a symbolic link to /run, restore at end
+    rm /etc/resolv.conf
+    echo "nameserver $nameserver_ip" > /etc/resolv.conf
+    echo "nameserver 8.8.8.8" >> /etc/resolv.conf
+    echo "nameserver 8.8.4.4" >> /etc/resolv.conf
+fi
+
+# Force apt to use ipv4 due to build problems on LF POD.
+echo 'Acquire::ForceIPv4 "true";' > /etc/apt/apt.conf.d/99force-ipv4
+
+echo 'GRUB_CMDLINE_LINUX="resume=/dev/sda1 default_hugepagesz=1G hugepagesz=1G hugepages=32 iommu=on iommu=pt intel_iommu=on"' >> /etc/default/grub
+
+# Add hostname to /etc/hosts.
+# Allow console access via pwd
+cat <<EOF >/etc/cloud/cloud.cfg.d/10_etc_hosts.cfg
+manage_etc_hosts: True
+password: ubuntu
+chpasswd: { expire: False }
+ssh_pwauth: True
+EOF
+
+linuxheadersversion=`echo ls boot/vmlinuz* | cut -d- -f2-`
+
+apt-get update
+apt-get install -y \
+    linux-headers-$linuxheadersversion \
+    screen \
+    locate \
+    sshpass \
+    git
+
+cd /root
+git clone -b stable/danube https://gerrit.onap.org/gerrit/vswitchperf
+
+# do not compile ovs and qemu
+sed -i.bak -e 's/^\(SUBBUILDS\ =\ src_vanilla\)/#\1/' \
+           -e 's/^\(SUBDIRS\ += ovs.*\)/#\1/' \
+           -e 's/^\(SUBDIRS\ += qemu.*\)/#\1/' \
+    vswitchperf/src/Makefile
+# If these paths do not exist, vsperf wont start
+mkdir -p /root/vswitchperf/src/ovs/ovs/ovsdb/
+touch /root/vswitchperf/src/ovs/ovs/ovsdb/ovsdb-tool
+touch /root/vswitchperf/src/ovs/ovs/ovsdb/ovsdb-server
+mkdir -p /root/vswitchperf/src/qemu/qemu/x86_64-softmmu/
+touch /root/vswitchperf/src/qemu/qemu/x86_64-softmmu/qemu-system-x86_64
+mkdir -p /root/vswitchperf/src/ovs/ovs/utilities/
+touch /root/vswitchperf/src/ovs/ovs/utilities/ovs-dpctl
+touch /root/vswitchperf/src/ovs/ovs/utilities/ovs-vsctl
+touch /root/vswitchperf/src/ovs/ovs/utilities/ovs-ofctl
+touch /root/vswitchperf/src/ovs/ovs/utilities/ovs-appctl
+mkdir -p /root/vswitchperf/src/ovs/ovs/vswitchd/
+touch /root/vswitchperf/src/ovs/ovs/vswitchd/vswitch.ovsschema
+touch /root/vswitchperf/src/ovs/ovs/vswitchd/ovs-vswitchd
+
+# restore symlink
+#ln -sf /run/resolvconf/resolv.conf /etc/resolv.conf
diff --git a/tools/vsperf_install.yml b/tools/vsperf_install.yml
new file mode 100644
index 0000000..413171a
--- /dev/null
+++ b/tools/vsperf_install.yml
@@ -0,0 +1,133 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tools/vsperf_install.yml
+
+heat_template_version: 2015-04-30
+
+description: >
+  Used to run VMs with Vsperf
+
+parameters:
+  image:
+    type: string
+    description: Name of the image
+    default: vnftest-vsperf
+
+  flavor:
+    type: string
+    default: vsperf-flavor
+
+  timeout:
+    type: number
+    description: Timeout in seconds for WaitCondition, depends on your image and environment
+    default: 6000
+
+  external_net_name:
+    type: string
+    description: Name of the external network which management network will connect to
+    default: ext-net1
+
+resources:
+  network:
+    type: OS::Neutron::Net
+    properties:
+      name: vsperf_net
+
+  subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      name: vsperf_subnet
+      ip_version: 4
+      cidr: 192.168.0.0/24
+      network: { get_resource: network }
+
+  management_router:
+    type: OS::Neutron::Router
+    properties:
+      name: management_router
+      external_gateway_info:
+        network: { get_param: external_net_name }
+
+  management_router_interface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router: { get_resource: management_router }
+      subnet: { get_resource: subnet }
+
+  floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network: { get_param: external_net_name }
+
+  floating_ip_association:
+    type: OS::Nova::FloatingIPAssociation
+    properties:
+      floating_ip: { get_resource: floating_ip }
+      server_id: {get_resource: vsperf_vm}
+
+  keypair:
+    type: OS::Nova::KeyPair
+    properties:
+      name: vnftest-key
+      public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD0RkXfW6pksd1cZmXuvXZF/Mlqqq3ahIGcGoULOC97XMpu0vdxMpcUwdjwGqMwEXTVyfHidu0l99bLqOCpSUKCmbWx3ONJ+1kqFx4HwsKEWLiyDYqsuMrDeZT1eFjC5avCoTcrIw2wq5NaBb00lDGagNZOeopaL5YIa4+PizEY23+cir24D67NU21Fg3JE92AIeGlNa4j66L3a+lL0hZq74Dilmp42wm4GsbplRO6KJfyaraowHb1X+TmhCjBgHk6M/OJ9yPAroZyJNcwjMAuuxhAYWRuT3SdbnoUR0RG2VhfDh0qNid7vOqLbhKPeaLLFmzkN+9w3WdCp6LbSYt87 vnftest@vnftest.onap.org
+
+  wait_handle:
+    type: OS::Heat::WaitConditionHandle
+
+  wait_condition:
+    type: OS::Heat::WaitCondition
+    properties:
+      handle: { get_resource: wait_handle }
+      count: 1
+      timeout: { get_param: timeout }
+
+  vsperf_vm:
+    type: OS::Nova::Server
+    depends_on: [subnet, keypair]
+    properties:
+      name: { get_param: "OS::stack_name" }
+      image: { get_param: image }
+      flavor: { get_param: flavor }
+      key_name: {get_resource: keypair}
+      networks:
+        - network: { get_resource: network }
+      config_drive: True
+      user_data_format : RAW
+      user_data:
+        str_replace:
+          template: |
+            #!/bin/bash
+            cat <<'CEOF' > /tmp/vsperf_post_build.sh
+            echo "Install vswitchperf"
+            mv /root/vswitchperf /home/ubuntu
+            chown -R ubuntu:ubuntu /home/ubuntu/vswitchperf
+            cd /home/ubuntu/vswitchperf/systems
+            sudo -H -u ubuntu ./build_base_machine.sh
+            echo "Set password less access to MoonGen server"
+            sudo -H -u ubuntu ssh-keygen -b 2048 -t rsa -f /home/ubuntu/.ssh/id_rsa -N ''
+            sudo -H -u ubuntu touch /home/ubuntu/.cloud-warnings.skip
+            echo "Enable 1GB huge pages"
+            update-grub
+            $NOTIFY --data-binary '{"status": "SUCCESS"}'
+            CEOF
+            chmod +x /tmp/vsperf_post_build.sh
+            nohup /tmp/vsperf_post_build.sh &
+          params:
+            $NOTIFY: { get_attr: ['wait_handle', 'curl_cli'] }
+
+outputs:
+  vm_uuid:
+    description: uuid of the VM
+    value: { get_attr: [ vsperf_vm, show,id ] }
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..ae6ec72
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,11 @@
+[tox]
+envlist = py27
+
+[testenv]
+usedevelop=True
+deps =
+    -r{toxinidir}/requirements.txt
+    -r{toxinidir}/test-requirements.txt
+commands = /bin/bash {toxinidir}/tools/run_tests.sh --unit
+whitelist_externals = /bin/bash
+
diff --git a/vnftest.egg-info/PKG-INFO b/vnftest.egg-info/PKG-INFO
new file mode 100644
index 0000000..14cf284
--- /dev/null
+++ b/vnftest.egg-info/PKG-INFO
@@ -0,0 +1,11 @@
+Metadata-Version: 1.0
+Name: vnftest
+Version: 0.1.dev0
+Summary: UNKNOWN
+Home-page: https://www.onap.org
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description-Content-Type: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/vnftest.egg-info/SOURCES.txt b/vnftest.egg-info/SOURCES.txt
new file mode 100644
index 0000000..04d6f8a
--- /dev/null
+++ b/vnftest.egg-info/SOURCES.txt
@@ -0,0 +1,70 @@
+setup.py
+etc/__init__.py
+tests/__init__.py
+tests/unit/__init__.py
+tests/unit/test_yaml_loader.py
+tools/vnftest-img-dpdk-modify
+tools/vnftest-img-lxd-modify
+tools/vnftest-img-modify
+vnftest/__init__.py
+vnftest/main.py
+vnftest/ssh.py
+vnftest.egg-info/PKG-INFO
+vnftest.egg-info/SOURCES.txt
+vnftest.egg-info/dependency_links.txt
+vnftest.egg-info/entry_points.txt
+vnftest.egg-info/top_level.txt
+vnftest/cmd/NSBperf.py
+vnftest/cmd/__init__.py
+vnftest/cmd/cli.py
+vnftest/cmd/commands/__init__.py
+vnftest/cmd/commands/env.py
+vnftest/cmd/commands/plugin.py
+vnftest/cmd/commands/report.py
+vnftest/cmd/commands/runner.py
+vnftest/cmd/commands/step.py
+vnftest/cmd/commands/task.py
+vnftest/cmd/commands/testcase.py
+vnftest/common/__init__.py
+vnftest/common/constants.py
+vnftest/common/exceptions.py
+vnftest/common/html_template.py
+vnftest/common/httpClient.py
+vnftest/common/openstack_utils.py
+vnftest/common/process.py
+vnftest/common/rest_client.py
+vnftest/common/task_template.py
+vnftest/common/template_format.py
+vnftest/common/utils.py
+vnftest/common/yaml_loader.py
+vnftest/dispatcher/__init__.py
+vnftest/dispatcher/base.py
+vnftest/dispatcher/file.py
+vnftest/dispatcher/http.py
+vnftest/onap/__init__.py
+vnftest/onap/contexts/__init__.py
+vnftest/onap/contexts/base.py
+vnftest/onap/contexts/csar.py
+vnftest/onap/contexts/dummy.py
+vnftest/onap/core/__init__.py
+vnftest/onap/core/plugin.py
+vnftest/onap/core/report.py
+vnftest/onap/core/runner.py
+vnftest/onap/core/step.py
+vnftest/onap/core/task.py
+vnftest/onap/core/testcase.py
+vnftest/onap/core/testsuite.py
+vnftest/onap/runners/__init__.py
+vnftest/onap/runners/base.py
+vnftest/onap/runners/duration.py
+vnftest/onap/runners/dynamictp.py
+vnftest/onap/runners/iteration.py
+vnftest/onap/runners/search.py
+vnftest/onap/runners/sequence.py
+vnftest/onap/steps/__init__.py
+vnftest/onap/steps/base.py
+vnftest/onap/steps/onap_api_call.py
+vnftest/onap/steps/dummy/__init__.py
+vnftest/onap/steps/dummy/dummy.py
+vnftest/onap/steps/onboard/__init__.py
+vnftest/tests/__init__.py
\ No newline at end of file
diff --git a/vnftest.egg-info/dependency_links.txt b/vnftest.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/vnftest.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/vnftest.egg-info/entry_points.txt b/vnftest.egg-info/entry_points.txt
new file mode 100644
index 0000000..e1abfaf
--- /dev/null
+++ b/vnftest.egg-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+vnftest = vnftest.main:main
+
diff --git a/vnftest.egg-info/top_level.txt b/vnftest.egg-info/top_level.txt
new file mode 100644
index 0000000..f2b8f14
--- /dev/null
+++ b/vnftest.egg-info/top_level.txt
@@ -0,0 +1,3 @@
+etc
+tests
+vnftest
diff --git a/vnftest/__init__.py b/vnftest/__init__.py
new file mode 100644
index 0000000..212c153
--- /dev/null
+++ b/vnftest/__init__.py
@@ -0,0 +1,61 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+from __future__ import absolute_import
+import logging
+import os
+import errno
+
+# this module must only import other modules that do
+# not require loggers to be created, so this cannot
+# include vnftest.common.utils
+from vnftest.common import constants
+
+try:
+    # do not use vnftest.common.utils.makedirs
+    # since vnftest.common.utils creates a logger
+    # and so it cannot be imported before this code
+    os.makedirs(constants.LOG_DIR)
+except OSError as e:
+    if e.errno != errno.EEXIST:
+        raise
+
+LOG_FILE = os.path.join(constants.LOG_DIR, 'vnftest.log')
+LOG_FORMATTER = '%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d %(message)s'
+
+_LOG_FORMATTER = logging.Formatter(LOG_FORMATTER)
+_LOG_STREAM_HDLR = logging.StreamHandler()
+_LOG_FILE_HDLR = logging.FileHandler(LOG_FILE)
+
+LOG = logging.getLogger(__name__)
+
+
+def _init_logging():
+
+    LOG.setLevel(logging.DEBUG)
+
+    _LOG_STREAM_HDLR.setFormatter(_LOG_FORMATTER)
+    if os.environ.get('CI_DEBUG', '').lower() in {'1', 'y', "yes", "true"}:
+        _LOG_STREAM_HDLR.setLevel(logging.DEBUG)
+    else:
+        _LOG_STREAM_HDLR.setLevel(logging.INFO)
+
+    # don't append to log file, clobber
+    _LOG_FILE_HDLR.setFormatter(_LOG_FORMATTER)
+    _LOG_FILE_HDLR.setLevel(logging.DEBUG)
+
+    del logging.root.handlers[:]
+    logging.root.addHandler(_LOG_STREAM_HDLR)
+    logging.root.addHandler(_LOG_FILE_HDLR)
+    logging.debug("logging.root.handlers = %s", logging.root.handlers)
diff --git a/vnftest/cmd/NSBperf.py b/vnftest/cmd/NSBperf.py
new file mode 100755
index 0000000..40a157b
--- /dev/null
+++ b/vnftest/cmd/NSBperf.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/NSBperf.py
+
+"""NSBPERF main script.
+"""
+
+from __future__ import absolute_import
+from __future__ import print_function
+import os
+import argparse
+import json
+import subprocess
+import signal
+from oslo_serialization import jsonutils
+
+from six.moves import input
+
+CLI_PATH = os.path.dirname(os.path.realpath(__file__))
+REPO_PATH = os.path.abspath(os.path.join(CLI_PATH, os.pardir))
+
+
+def sigint_handler(*args, **kwargs):
+    """ Capture ctrl+c and exit cli """
+    subprocess.call(["pkill", "-9", "vnftest"])
+    raise SystemExit(1)
+
+
+class VnftestNSCli(object):
+    """ This class handles vnftest network serivce testing """
+
+    def __init__(self):
+        super(VnftestNSCli, self).__init__()
+
+    @classmethod
+    def validate_input(cls, choice, choice_len):
+        """ Validate user inputs """
+        if not str(choice):
+            return 1
+
+        choice = int(choice)
+        if not 1 <= choice <= choice_len:
+            print("\nInvalid wrong choice...")
+            input("Press Enter to continue...")
+            return 1
+        subprocess.call(['clear'])
+        return 0
+
+    @classmethod
+    def parse_arguments(cls):
+        """
+        Parse command line arguments.
+        """
+        parser = \
+            argparse.ArgumentParser(
+                prog=__file__,
+                formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+        parser.add_argument('--version', action='version',
+                            version='%(prog)s 0.1')
+        parser.add_argument('--list', '--list-tests', action='store_true',
+                            help='list all tests and exit')
+        parser.add_argument('--list-vnfs', action='store_true',
+                            help='list all system vnfs and exit')
+
+        group = parser.add_argument_group('test selection options')
+        group.add_argument('--vnf', help='vnf to use')
+        group.add_argument('--test', help='test in use')
+
+        args = vars(parser.parse_args())
+
+        return args
+
+    @classmethod
+    def generate_kpi_results(cls, tkey, tgen):
+        """ Generate report for vnf & traffic generator kpis """
+        if tgen:
+            print("\n%s stats" % tkey)
+            print("----------------------------")
+            for key, value in tgen.items():
+                if key != "collect_stats":
+                    print(json.dumps({key: value}, indent=2))
+
+    @classmethod
+    def generate_nfvi_results(cls, nfvi):
+        """ Generate report for vnf & traffic generator kpis """
+        if nfvi:
+            nfvi_kpi = {k: v for k, v in nfvi.items() if k == 'collect_stats'}
+            if nfvi_kpi:
+                print("\nNFVi stats")
+                print("----------------------------")
+                for key, value in nfvi_kpi.items():
+                    print(json.dumps({key: value}, indent=2))
+
+    def generate_final_report(self, test_case):
+        """ Function will check if partial test results are available
+        and generates final report in rst format.
+        """
+
+        tc_name = os.path.splitext(test_case)[0]
+        report_caption = '{}\n{} ({})\n{}\n\n'.format(
+            '================================================================',
+            'Performance report for', tc_name.upper(),
+            '================================================================')
+        print(report_caption)
+        if os.path.isfile("/tmp/vnftest.out"):
+            lines = []
+            with open("/tmp/vnftest.out") as infile:
+                lines = jsonutils.load(infile)
+
+            if lines:
+                lines = \
+                    lines['result']["testcases"][tc_name]["tc_data"]
+                tc_res = lines.pop(len(lines) - 1)
+                for key, value in tc_res["data"].items():
+                    self.generate_kpi_results(key, value)
+                    self.generate_nfvi_results(value)
+
+    @classmethod
+    def handle_list_options(cls, args, test_path):
+        """ Process --list cli arguments if needed
+
+        :param args: A dictionary with all CLI arguments
+        """
+        if args['list_vnfs']:
+            vnfs = os.listdir(test_path)
+            print("VNF :")
+            print("================")
+            for index, vnf in enumerate(vnfs, 1):
+                print((' %-2s %s' % ('%s:' % str(index), vnf)))
+            raise SystemExit(0)
+
+        if args['list']:
+            vnfs = os.listdir(test_path)
+
+            print("Available Tests:")
+            print("*****************")
+            for vnf in vnfs:
+                testcases = os.listdir(test_path + vnf)
+                print(("VNF :(%s)" % vnf))
+                print("================")
+                test_cases = [tc for tc in testcases if "tc_" in tc and "template" not in tc]
+
+                print("\tBareMetal Testcase:")
+                print("\t===================")
+                for testcase in [tc for tc in test_cases if "baremetal" in tc]:
+                    print("\t%s" % testcase)
+
+                print(os.linesep)
+                print("\tStandalone Virtualization Testcase:")
+                print("\t===================================")
+                for testcase in [tc for tc in test_cases if "ovs" in tc or "sriov" in tc]:
+                    print("\t%s" % testcase)
+
+                print(os.linesep)
+                print("\tOpenstack Testcase:")
+                print("\t===================")
+                for testcase in [tc for tc in test_cases if "heat" in tc]:
+                    print("\t%s" % testcase)
+                print(os.linesep)
+            raise SystemExit(0)
+
+    @classmethod
+    def terminate_if_less_options(cls, args):
+        """ terminate cli if cmdline options is invalid """
+        if not (args["vnf"] and args["test"]):
+            print("CLI needs option, make sure to pass vnf, test")
+            print("eg: NSBperf.py --vnf <vnf untertest> --test <test yaml>")
+            raise SystemExit(1)
+
+    def run_test(self, args, test_path):
+        """ run requested test """
+        try:
+            vnf = args.get("vnf", "")
+            test = args.get("test", "")
+
+            vnf_dir = test_path + os.sep + vnf
+            if not os.path.exists(vnf_dir):
+                raise ValueError("'%s', vnf not supported." % vnf)
+
+            testcases = [tc for tc in os.listdir(vnf_dir) if "tc" in tc]
+            subtest = set([test]).issubset(testcases)
+            if not subtest:
+                raise ValueError("'%s', testcase not supported." % test)
+
+            os.chdir(vnf_dir)
+            # fixme: Use REST APIs to initiate testcases
+            subprocess.check_output(["vnftest", "--debug",
+                                     "task", "start", test])
+            self.generate_final_report(test)
+        except (IOError, ValueError):
+            print("Value/I/O error...")
+        except BaseException:
+            print("Test failed. Please verify test inputs & re-run the test..")
+            print("eg: NSBperf.py --vnf <vnf untertest> --test <test yaml>")
+
+    def main(self):
+        """Main function.
+        """
+        test_path = os.path.join(REPO_PATH, "../samples/vnf_samples/nsut/")
+        os.chdir(os.path.join(REPO_PATH, "../"))
+        args = self.parse_arguments()
+
+        # if required, handle list-* operations
+        self.handle_list_options(args, test_path)
+
+        # check for input params
+        self.terminate_if_less_options(args)
+
+        # run test
+        self.run_test(args, test_path)
+
+if __name__ == "__main__":
+    signal.signal(signal.SIGINT, sigint_handler)
+    NS_CLI = VnftestNSCli()
+    NS_CLI.main()
diff --git a/vnftest/cmd/__init__.py b/vnftest/cmd/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/cmd/__init__.py
diff --git a/vnftest/cmd/cli.py b/vnftest/cmd/cli.py
new file mode 100644
index 0000000..9bffe56
--- /dev/null
+++ b/vnftest/cmd/cli.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/cli.py
+"""
+Command-line interface to vnftest
+"""
+
+from __future__ import absolute_import
+import logging
+import os
+import sys
+
+from pkg_resources import get_distribution
+from argparse import RawDescriptionHelpFormatter
+from oslo_config import cfg
+
+from vnftest import _init_logging, _LOG_STREAM_HDLR
+from vnftest.cmd.commands import task
+from vnftest.cmd.commands import runner
+from vnftest.cmd.commands import step
+from vnftest.cmd.commands import testcase
+from vnftest.cmd.commands import plugin
+from vnftest.cmd.commands import env
+from vnftest.cmd.commands import report
+
+CONF = cfg.CONF
+cli_opts = [
+    cfg.BoolOpt('debug',
+                short='d',
+                default=False,
+                help='increase output verbosity to debug')
+]
+CONF.register_cli_opts(cli_opts)
+
+CONFIG_SEARCH_PATHS = [sys.prefix + "/etc/vnftest",
+                       "~/.vnftest",
+                       "/etc/vnftest"]
+
+
+def find_config_files(path_list):
+    for path in path_list:
+        abspath = os.path.abspath(os.path.expanduser(path))
+        confname = abspath + "/vnftest.conf"
+        if os.path.isfile(confname):
+            return [confname]
+
+    return None
+
+
+class VnftestCLI():   # pragma: no cover
+    """Command-line interface to vnftest"""
+
+    # Command categories
+    categories = {
+        'task': task.TaskCommands,
+        'runner': runner.RunnerCommands,
+        'step': step.StepCommands,
+        'testcase': testcase.TestcaseCommands,
+        'plugin': plugin.PluginCommands,
+        'env': env.EnvCommand,
+        'report': report.ReportCommands
+    }
+
+    def __init__(self):
+        self.opts = []
+        self._version = 'vnftest version %s ' % \
+            get_distribution('vnftest').version
+
+    def _find_actions(self, subparsers, actions_module):
+        """find action methods"""
+        # Find action methods inside actions_module and
+        # add them to the command parser.
+        # The 'actions_module' argument may be a class
+        # or module. Action methods start with 'do_'
+        for attr in (a for a in dir(actions_module) if a.startswith('do_')):
+            command = attr[3:].replace('_', '-')
+            callback = getattr(actions_module, attr)
+            desc = callback.__doc__ or ''
+            arguments = getattr(callback, 'arguments', [])
+            subparser = subparsers.add_parser(
+                command,
+                description=desc
+            )
+            for (args, kwargs) in arguments:
+                subparser.add_argument(*args, **kwargs)
+            subparser.set_defaults(func=callback)
+
+    def _add_command_parsers(self, categories, subparsers):
+        """add commands to command-line parser"""
+        for category in categories:
+            command_object = categories[category]()
+            desc = command_object.__doc__ or ''
+            subparser = subparsers.add_parser(
+                category, description=desc,
+                formatter_class=RawDescriptionHelpFormatter
+            )
+            subparser.set_defaults(command_object=command_object)
+            cmd_subparsers = subparser.add_subparsers(title='subcommands')
+            self._find_actions(cmd_subparsers, command_object)
+
+    def _register_cli_opt(self):
+
+        # register subcommands to parse additional command line arguments
+        def parser(subparsers):
+            self._add_command_parsers(VnftestCLI.categories, subparsers)
+
+        category_opt = cfg.SubCommandOpt("category",
+                                         title="Command categories",
+                                         help="Available categories",
+                                         handler=parser)
+        self._register_opt(category_opt)
+
+    def _register_opt(self, opt):
+
+        CONF.register_cli_opt(opt)
+        self.opts.append(opt)
+
+    def _load_cli_config(self, argv):
+
+        # load CLI args and config files
+        CONF(argv, project="vnftest", version=self._version,
+             default_config_files=find_config_files(CONFIG_SEARCH_PATHS))
+
+    def _handle_global_opts(self):
+
+        _init_logging()
+        if CONF.debug:
+            _LOG_STREAM_HDLR.setLevel(logging.DEBUG)
+
+    def _dispatch_func_notask(self):
+
+        # dispatch to category parser
+        func = CONF.category.func
+        func(CONF.category)
+
+    def _dispatch_func_task(self, task_id):
+
+        # dispatch to category parser
+        func = CONF.category.func
+        func(CONF.category, task_id=task_id)
+
+    def _clear_config_opts(self):
+
+        CONF.clear()
+        CONF.unregister_opts(self.opts)
+
+    def main(self, argv):    # pragma: no cover
+        """run the command line interface"""
+        try:
+            self._register_cli_opt()
+
+            self._load_cli_config(argv)
+
+            self._handle_global_opts()
+
+            self._dispatch_func_notask()
+        finally:
+            self._clear_config_opts()
+
+    def api(self, argv, task_id):    # pragma: no cover
+        """run the api interface"""
+        try:
+            self._register_cli_opt()
+
+            self._load_cli_config(argv)
+
+            self._handle_global_opts()
+
+            self._dispatch_func_task(task_id)
+        finally:
+            self._clear_config_opts()
diff --git a/vnftest/cmd/commands/__init__.py b/vnftest/cmd/commands/__init__.py
new file mode 100644
index 0000000..8c46b47
--- /dev/null
+++ b/vnftest/cmd/commands/__init__.py
@@ -0,0 +1,17 @@
+from __future__ import absolute_import
+from vnftest.onap.core import Param
+
+
+def change_osloobj_to_paras(args):
+    param = Param({})
+    for k in vars(param):
+        if hasattr(args, k):
+            setattr(param, k, getattr(args, k))
+    return param
+
+
+class Commands(object):
+
+    def _change_to_dict(self, args):
+        p = Param({})
+        return {k: getattr(args, k) for k in vars(p) if hasattr(args, k)}
diff --git a/vnftest/cmd/commands/env.py b/vnftest/cmd/commands/env.py
new file mode 100644
index 0000000..55f0ebb
--- /dev/null
+++ b/vnftest/cmd/commands/env.py
@@ -0,0 +1,95 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/env.py
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+import sys
+import time
+
+from six.moves import range
+
+from vnftest.common import constants as consts
+from vnftest.common.httpClient import HttpClient
+
+
+class EnvCommand(object):
+    """
+
+        Set of commands to prepare environment
+    """
+
+    def do_influxdb(self, args):
+        data = {'action': 'create_influxdb'}
+        task_id = self._start_async_task(data)
+
+        start = '* creating influxDB'
+        self._check_status(task_id, start)
+
+    def do_grafana(self, args):
+        data = {'action': 'create_grafana'}
+        task_id = self._start_async_task(data)
+
+        start = '* creating grafana'
+        self._check_status(task_id, start)
+
+    def do_prepare(self, args):
+        data = {'action': 'prepare_env'}
+        task_id = self._start_async_task(data)
+
+        start = '* preparing vnftest environment'
+        self._check_status(task_id, start)
+
+    def _start_async_task(self, data):
+        url = consts.ENV_ACTION_API
+        return HttpClient().post(url, data)['result']['task_id']
+
+    def _check_status(self, task_id, start):
+        self._print_status(start, '[]\r')
+        url = '{}?task_id={}'.format(consts.ASYNC_TASK_API, task_id)
+
+        CHECK_STATUS_RETRY = 20
+        CHECK_STATUS_DELAY = 5
+
+        for retry in range(CHECK_STATUS_RETRY):
+            response = HttpClient().get(url)
+            status = response['status']
+
+            if status:
+                break
+
+            # wait until the async task finished
+            time.sleep(CHECK_STATUS_DELAY * (retry + 1))
+
+        switcher = {
+            0: 'Timeout',
+            1: 'Finished',
+            2: 'Error'
+        }
+        self._print_status(start, '[{}]'.format(switcher[status]))
+        if status == 2:
+            print(response['result'])
+            sys.stdout.flush()
+        return status
+
+    def _print_status(self, s, e):
+        try:
+            columns = int(os.popen('stty size', 'r').read().split()[1])
+            word = '{}{}{}'.format(s, ' ' * (columns - len(s) - len(e)), e)
+            sys.stdout.write(word)
+            sys.stdout.flush()
+        except IndexError:
+            pass
diff --git a/vnftest/cmd/commands/plugin.py b/vnftest/cmd/commands/plugin.py
new file mode 100644
index 0000000..e05130a
--- /dev/null
+++ b/vnftest/cmd/commands/plugin.py
@@ -0,0 +1,44 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/plugin.py
+""" Handler for vnftest command 'plugin' """
+
+from __future__ import print_function
+
+from __future__ import absolute_import
+from vnftest.onap.core.plugin import Plugin
+from vnftest.common.utils import cliargs
+from vnftest.cmd.commands import change_osloobj_to_paras
+
+
+class PluginCommands(object):   # pragma: no cover
+    """Plugin commands.
+
+       Set of commands to manage plugins.
+    """
+
+    @cliargs("input_file", type=str, help="path to plugin configuration file",
+             nargs=1)
+    def do_install(self, args):
+        """Install a plugin."""
+        param = change_osloobj_to_paras(args)
+        Plugin().install(param)
+
+    @cliargs("input_file", type=str, help="path to plugin configuration file",
+             nargs=1)
+    def do_remove(self, args):
+        """Remove a plugin."""
+        param = change_osloobj_to_paras(args)
+        Plugin().remove(param)
diff --git a/vnftest/cmd/commands/report.py b/vnftest/cmd/commands/report.py
new file mode 100644
index 0000000..05b9249
--- /dev/null
+++ b/vnftest/cmd/commands/report.py
@@ -0,0 +1,34 @@
+##############################################################################
+# Copyright (c) 2017 Rajesh Kudaka.
+#
+# Author: Rajesh Kudaka (4k.rajesh@gmail.com)
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/report.py
+""" Handler for vnftest command 'report' """
+
+from __future__ import print_function
+
+from __future__ import absolute_import
+
+from vnftest.onap.core.report import Report
+from vnftest.cmd.commands import change_osloobj_to_paras
+from vnftest.common.utils import cliargs
+
+
+class ReportCommands(object):   # pragma: no cover
+    """Report commands.
+
+    Set of commands to manage benchmark tasks.
+    """
+
+    @cliargs("task_id", type=str, help=" task id", nargs=1)
+    @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
+    def do_generate(self, args):
+        """Start a benchmark step."""
+        param = change_osloobj_to_paras(args)
+        Report().generate(param)
diff --git a/vnftest/cmd/commands/runner.py b/vnftest/cmd/commands/runner.py
new file mode 100644
index 0000000..557f58f
--- /dev/null
+++ b/vnftest/cmd/commands/runner.py
@@ -0,0 +1,41 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/runner.py
+""" Handler for vnftest command 'runner' """
+
+from __future__ import print_function
+
+from __future__ import absolute_import
+from vnftest.onap.core.runner import Runners
+from vnftest.common.utils import cliargs
+from vnftest.cmd.commands import change_osloobj_to_paras
+
+
+class RunnerCommands(object):   # pragma: no cover
+    """Runner commands.
+
+       Set of commands to discover and display runner types.
+    """
+
+    def do_list(self, args):
+        """List existing runner types"""
+        param = change_osloobj_to_paras(args)
+        Runners().list_all(param)
+
+    @cliargs("type", type=str, help="runner type", nargs=1)
+    def do_show(self, args):
+        """Show details of a specific runner type"""
+        param = change_osloobj_to_paras(args)
+        Runners().show(param)
diff --git a/vnftest/cmd/commands/step.py b/vnftest/cmd/commands/step.py
new file mode 100644
index 0000000..10ae913
--- /dev/null
+++ b/vnftest/cmd/commands/step.py
@@ -0,0 +1,40 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/step.py
+""" Handler for vnftest command 'step' """
+
+from __future__ import print_function
+from __future__ import absolute_import
+from vnftest.onap.core.step import Steps
+from vnftest.common.utils import cliargs
+from vnftest.cmd.commands import change_osloobj_to_paras
+
+
+class StepCommands(object):     # pragma: no cover
+    """Step commands.
+
+       Set of commands to discover and display step types.
+    """
+
+    def do_list(self, args):
+        """List existing step types"""
+        param = change_osloobj_to_paras(args)
+        Steps().list_all(param)
+
+    @cliargs("type", type=str, help="runner type", nargs=1)
+    def do_show(self, args):
+        """Show details of a specific step type"""
+        param = change_osloobj_to_paras(args)
+        Steps().show(param)
diff --git a/vnftest/cmd/commands/task.py b/vnftest/cmd/commands/task.py
new file mode 100644
index 0000000..cc77bb8
--- /dev/null
+++ b/vnftest/cmd/commands/task.py
@@ -0,0 +1,70 @@
+#############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/task.py
+""" Handler for vnftest command 'task' """
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from vnftest.onap.core.task import Task
+from vnftest.common.utils import cliargs
+from vnftest.common.utils import write_json_to_file
+from vnftest.cmd.commands import change_osloobj_to_paras
+
+output_file_default = "/tmp/vnftest.out"
+
+
+LOG = logging.getLogger(__name__)
+
+
+class TaskCommands(object):     # pragma: no cover
+    """Task commands.
+
+       Set of commands to manage benchmark tasks.
+       """
+
+    @cliargs("inputfile", type=str, help="path to task or suite file", nargs=1)
+    @cliargs("--task-args", dest="task_args",
+             help="Input task args (dict in json). These args are used"
+             "to render input task that is jinja2 template.")
+    @cliargs("--task-args-file", dest="task_args_file",
+             help="Path to the file with input task args (dict in "
+             "json/yaml). These args are used to render input"
+             "task that is jinja2 template.")
+    @cliargs("--keep-deploy", help="keep context deployed in cloud",
+             action="store_true")
+    @cliargs("--parse-only", help="parse the config file and exit",
+             action="store_true")
+    @cliargs("--output-file", help="file where output is stored, default %s" %
+             output_file_default, default=output_file_default)
+    @cliargs("--suite", help="process test suite file instead of a task file",
+             action="store_true")
+    def do_start(self, args, **kwargs):
+        param = change_osloobj_to_paras(args)
+        self.output_file = param.output_file
+
+        result = {}
+        LOG.info('Task START')
+        try:
+            result = Task().start(param, **kwargs)
+        except Exception as e:
+            self._write_error_data(e)
+            LOG.exception("")
+
+        if result.get('result', {}).get('criteria') == 'PASS':
+            LOG.info('Task SUCCESS')
+        else:
+            LOG.info('Task FAILED')
+            raise RuntimeError('Task Failed')
+
+    def _write_error_data(self, error):
+        data = {'status': 2, 'result': str(error)}
+        write_json_to_file(self.output_file, data)
diff --git a/vnftest/cmd/commands/testcase.py b/vnftest/cmd/commands/testcase.py
new file mode 100644
index 0000000..518df2d
--- /dev/null
+++ b/vnftest/cmd/commands/testcase.py
@@ -0,0 +1,49 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/testcase.py
+""" Handler for vnftest command 'testcase' """
+from __future__ import absolute_import
+
+import prettytable
+
+from vnftest.onap.core.testcase import Testcase
+from vnftest.common.utils import cliargs
+from vnftest.cmd.commands import change_osloobj_to_paras
+from vnftest.cmd.commands import Commands
+
+
+class TestcaseCommands(Commands):
+    """Testcase commands.
+
+       Set of commands to discover and display test cases.
+    """
+
+    def do_list(self, *args):
+        testcase_list = ""
+        self._format_print(testcase_list)
+
+    @cliargs("casename", type=str, help="test case name", nargs=1)
+    def do_show(self, args):
+        """Show details of a specific test case"""
+        param = change_osloobj_to_paras(args)
+        Testcase().show(param)
+
+    def _format_print(self, testcase_list):
+        """format output"""
+        case_table = prettytable.PrettyTable(['Testcase Name', 'Description'])
+        case_table.align = 'l'
+        for testcase_record in testcase_list:
+            case_table.add_row([testcase_record['Name'], testcase_record['Description']])
+        print(case_table)
diff --git a/vnftest/common/__init__.py b/vnftest/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/common/__init__.py
diff --git a/vnftest/common/constants.py b/vnftest/common/constants.py
new file mode 100644
index 0000000..9da64ba
--- /dev/null
+++ b/vnftest/common/constants.py
@@ -0,0 +1,147 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/constants.py
+from __future__ import absolute_import
+import os
+import errno
+
+from functools import reduce
+
+import pkg_resources
+
+# this module must only import other modules that do
+# not require loggers to be created, so this cannot
+# include vnftest.common.utils
+from vnftest.common.yaml_loader import yaml_load
+
+dirname = os.path.dirname
+abspath = os.path.abspath
+join = os.path.join
+sep = os.path.sep
+
+CONF = {}
+CONF_FILE = None
+VNFTEST_ROOT_PATH = dirname(
+    dirname(abspath(pkg_resources.resource_filename(__name__, "")))) + sep
+
+
+def get_param(key, default=''):
+    # we have to defer this to runtime so that we can mock os.environ.get in unittests
+    default_path = os.path.join(VNFTEST_ROOT_PATH, "etc/vnftest/vnftest.yaml")
+    conf_file = os.environ.get('CONF_FILE', default_path)
+    # don't re-parse yaml for each lookup
+    if not CONF:
+        # do not use vnftest.common.utils.parse_yaml
+        # since vnftest.common.utils creates a logger
+        # and so it cannot be imported before this code
+        try:
+            with open(conf_file) as f:
+                value = yaml_load(f)
+        except IOError:
+            pass
+        except OSError as e:
+            if e.errno != errno.EEXIST:
+                raise
+        else:
+            CONF.update(value)
+    try:
+        return reduce(lambda a, b: a[b], key.split('.'), CONF)
+    except KeyError:
+        if not default:
+            raise
+        return default
+
+
+try:
+    SERVER_IP = get_param('api.server_ip')
+except KeyError:
+    try:
+        from pyroute2 import IPDB
+    except ImportError:
+        SERVER_IP = '172.17.0.1'
+    else:
+        with IPDB() as ip:
+            try:
+                SERVER_IP = ip.routes['default'].gateway
+            except KeyError:
+                # during unittests ip.routes['default'] can be invalid
+                SERVER_IP = '127.0.0.1'
+
+if not SERVER_IP:
+    SERVER_IP = '127.0.0.1'
+
+
+# dir
+CONF_DIR = get_param('dir.conf', join(VNFTEST_ROOT_PATH, 'etc/vnftest'))
+CONF_FILE = join(CONF_DIR, 'vnftest.conf')
+REPOS_DIR = get_param('dir.repos', join(VNFTEST_ROOT_PATH, 'home/onap/repos/vnftest'))
+LOG_DIR = get_param('dir.log', join(VNFTEST_ROOT_PATH, 'tmp/vnftest/'))
+
+TASK_LOG_DIR = get_param('dir.tasklog', join(VNFTEST_ROOT_PATH, 'var/log/vnftest/'))
+CONF_SAMPLE_DIR = join(REPOS_DIR, 'etc/vnftest/')
+SAMPLE_CASE_DIR = join(REPOS_DIR, 'samples')
+TESTCASE_DIR = join(VNFTEST_ROOT_PATH, 'tests/onap/test_cases/')
+TESTSUITE_DIR = join(VNFTEST_ROOT_PATH, 'tests/onap/test_suites/')
+
+# file
+DEFAULT_OUTPUT_FILE = get_param('file.output_file', join(VNFTEST_ROOT_PATH, 'tmp/vnftest.out'))
+DEFAULT_HTML_FILE = get_param('file.html_file', join(VNFTEST_ROOT_PATH, 'tmp/vnftest.htm'))
+REPORTING_FILE = get_param('file.reporting_file', join(VNFTEST_ROOT_PATH, 'tmp/report.html'))
+
+# components
+AAI_IP = get_param('component.aai_ip')
+AAI_PORT = get_param('component.aai_port')
+AAI_SSL_PORT = get_param('component.aai_ssl_port')
+MSO_IP = get_param('component.mso_ip')
+SDC_IP = get_param('component.sdc_ip')
+SDC_PORT = get_param('component.sdc_port')
+SDC_CATALOG_PORT = get_param('component.sdc_catalog_port')
+SDC_DESIGNER_USER = get_param('component.sdc_designer_user')
+SDC_TESTER_USER = get_param('component.sdc_tester_user')
+SDC_GOVERNANCE_USER = get_param('component.sdc_governance_user')
+SDC_OPERATIONS_USER = get_param('component.sdc_operations_user')
+
+component_constants = {}
+component_constants['aai_ip'] = AAI_IP
+component_constants['aai_port'] = AAI_PORT
+component_constants['aai_ssl_port'] = AAI_SSL_PORT
+component_constants['mso_ip'] = MSO_IP
+component_constants['sdc_ip'] = SDC_IP
+component_constants['sdc_port'] = SDC_PORT
+component_constants['sdc_catalog_port'] = SDC_CATALOG_PORT
+component_constants['sdc_designer_user'] = SDC_DESIGNER_USER
+component_constants['sdc_tester_user'] = SDC_TESTER_USER
+component_constants['sdc_governance_user'] = SDC_GOVERNANCE_USER
+component_constants['sdc_operations_user'] = SDC_OPERATIONS_USER
+
+
+# api
+API_PORT = 5000
+DOCKER_URL = 'unix://var/run/docker.sock'
+SQLITE = 'sqlite:////tmp/vnftest.db'
+
+API_SUCCESS = 1
+API_ERROR = 2
+TASK_NOT_DONE = 0
+TASK_DONE = 1
+TASK_FAILED = 2
+
+BASE_URL = 'http://localhost:5000'
+ENV_ACTION_API = BASE_URL + '/vnftest/env/action'
+ASYNC_TASK_API = BASE_URL + '/vnftest/asynctask'
+
+# general
+TESTCASE_PRE = 'onap_vnftest_'
+TESTSUITE_PRE = 'onap_'
diff --git a/vnftest/common/exceptions.py b/vnftest/common/exceptions.py
new file mode 100644
index 0000000..6273cd3
--- /dev/null
+++ b/vnftest/common/exceptions.py
@@ -0,0 +1,61 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/exceptions.py
+
+from oslo_utils import excutils
+
+
+class ProcessExecutionError(RuntimeError):
+    def __init__(self, message, returncode):
+        super(ProcessExecutionError, self).__init__(message)
+        self.returncode = returncode
+
+
+class VnftestException(Exception):
+    """Base Vnftest Exception.
+
+    To correctly use this class, inherit from it and define
+    a 'message' property. That message will get printf'd
+    with the keyword arguments provided to the constructor.
+
+    Based on NeutronException class.
+    """
+    message = "An unknown exception occurred."
+
+    def __init__(self, **kwargs):
+        try:
+            super(VnftestException, self).__init__(self.message % kwargs)
+            self.msg = self.message % kwargs
+        except Exception:  # pylint: disable=broad-except
+            with excutils.save_and_reraise_exception() as ctxt:
+                if not self.use_fatal_exceptions():
+                    ctxt.reraise = False
+                    # at least get the core message out if something happened
+                    super(VnftestException, self).__init__(self.message)
+
+    def __str__(self):
+        return self.msg
+
+    def use_fatal_exceptions(self):
+        """Is the instance using fatal exceptions.
+
+        :returns: Always returns False.
+        """
+        return False
+
+
+class FunctionNotImplemented(VnftestException):
+    message = ('The function "%(function_name)s" is not implemented in '
+               '"%(class_name)" class.')
diff --git a/vnftest/common/html_template.py b/vnftest/common/html_template.py
new file mode 100644
index 0000000..572d47f
--- /dev/null
+++ b/vnftest/common/html_template.py
@@ -0,0 +1,195 @@
+#############################################################################
+# Copyright (c) 2017 Rajesh Kudaka
+#
+# Author: Rajesh Kudaka 4k.rajesh@gmail.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/html_template.py
+
+template = """
+<html>
+<body>
+<head>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1">
+<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7\
+/css/bootstrap.min.css">
+<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1\
+/jquery.min.js"></script>
+<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7\
+/js/bootstrap.min.js"></script>
+<script src="https://code.highcharts.com/highcharts.js"></script>
+<script src="jquery.min.js"></script>
+<script src="highcharts.js"></script>
+</head>
+<style>
+
+table{
+  overflow-y: scroll;
+  height: 360px;
+  display: block;
+  }
+
+ header,h3{
+    font-family:Frutiger;
+    clear: left;
+    text-align: center;
+}
+</style>
+<header class="jumbotron text-center">
+  <h1>Vnftest User Interface</h1>
+  <h4>Report of {{task_id}} Generated</h4>
+</header>
+
+<div class="container">
+  <div class="row">
+    <div class="col-md-4">
+        <div class="table-responsive" >
+        <table  class="table table-hover" > </table>
+        </div>
+    </div>
+    <div class="col-md-8" >
+    <div id="container" ></div>
+   </div>
+  </div>
+</div>
+<script>
+  var arr, tab, th, tr, td, tn, row, col, thead, tbody;
+  arr={{table|safe}}
+  tab = document.getElementsByTagName('table')[0];
+  thead=document.createElement('thead');
+  tr = document.createElement('tr');
+  for(row=0;row<Object.keys(arr).length;row++)
+  {
+      th = document.createElement('th');
+      tn = document.createTextNode(Object.keys(arr).sort()[row]);
+      th.appendChild(tn);
+      tr.appendChild(th);
+          thead.appendChild(tr);
+  }
+  tab.appendChild(thead);
+  tbody=document.createElement('tbody');
+
+  for (col = 0; col < arr[Object.keys(arr)[0]].length; col++){
+  tr = document.createElement('tr');
+  for(row=0;row<Object.keys(arr).length;row++)
+  {
+      td = document.createElement('td');
+      tn = document.createTextNode(arr[Object.keys(arr).sort()[row]][col]);
+      td.appendChild(tn);
+      tr.appendChild(td);
+  }
+    tbody.appendChild(tr);
+        }
+tab.appendChild(tbody);
+
+</script>
+
+<script language="JavaScript">
+
+$(function() {
+  $('#container').highcharts({
+    title: {
+      text: 'Vnftest test results',
+      x: -20 //center
+    },
+    subtitle: {
+      text: 'Report of {{task_id}} Task Generated',
+      x: -20
+    },
+    xAxis: {
+      title: {
+        text: 'Timestamp'
+      },
+      categories:{{Timestamp|safe}}
+    },
+    yAxis: {
+
+      plotLines: [{
+        value: 0,
+        width: 1,
+        color: '#808080'
+      }]
+    },
+    tooltip: {
+      valueSuffix: ''
+    },
+    legend: {
+      layout: 'vertical',
+      align: 'right',
+      verticalAlign: 'middle',
+      borderWidth: 0
+    },
+    series: {{series|safe}}
+  });
+});
+
+</script>
+
+
+</body>
+</html>"""
+
+report_template = """
+<html>
+    <head>
+        <title>Vnftest Report</title>
+        <link href="http://cdn.static.runoob.com/libs/bootstrap/3.3.7/css\
+/bootstrap.min.css" rel="stylesheet">
+    </head>
+    <div class="content">
+        <h3>Vnftest Report </h3>
+        <hr/>
+        <div>
+
+            <div>Task ID : {{result.task_id}} </div>
+            <div style="margin-top:5px;">Criteria :
+                <font> {{result.criteria}}</font>
+            </div>
+            <hr/>
+
+            <caption>Information</caption>
+            <table class="table table-striped">
+                <tr>
+                    <th>#</th>
+                    <th>key</th>
+                    <th>value</th>
+                </tr>
+                <tbody>
+                    {% for key, value in result.info.items() %}
+                    <tr>
+                        <td>{{ loop.index }}</td>
+                        <td>{{key}}</td>
+                        <td>{{value}}</td>
+                    </tr>
+                    {% endfor %}
+                </tbody>
+            </table>
+            <hr/>
+
+            <caption>Test Cases</caption>
+            <table class="table table-striped">
+                <tr>
+                    <th>#</th>
+                    <th>key</th>
+                    <th>value</th>
+                </tr>
+                <tbody>
+                    {% for key, value in result.testcases.items() %}
+                    <tr>
+                        <td>{{ loop.index }}</td>
+                        <td>{{key}}</td>
+                        <td>{{value.criteria}}</td>
+                    </tr>
+                    {% endfor %}
+                </tbody>
+            </table>
+
+        </div>
+    </div>
+</html>
+"""
diff --git a/vnftest/common/httpClient.py b/vnftest/common/httpClient.py
new file mode 100644
index 0000000..e2c7937
--- /dev/null
+++ b/vnftest/common/httpClient.py
@@ -0,0 +1,48 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/httpClient.py
+
+from __future__ import absolute_import
+
+import logging
+import time
+
+from oslo_serialization import jsonutils
+import requests
+
+logger = logging.getLogger(__name__)
+
+
+class HttpClient(object):
+
+    def post(self, url, data, timeout=0):
+        data = jsonutils.dump_as_bytes(data)
+        headers = {'Content-Type': 'application/json'}
+        t_end = time.time() + timeout
+        while True:
+            try:
+                response = requests.post(url, data=data, headers=headers)
+                result = response.json()
+                logger.debug('The result is: %s', result)
+                return result
+            except Exception:
+                if time.time() > t_end:
+                    logger.exception('')
+                    raise
+            time.sleep(1)
+
+    def get(self, url):
+        response = requests.get(url)
+        return response.json()
diff --git a/vnftest/common/openstack_utils.py b/vnftest/common/openstack_utils.py
new file mode 100644
index 0000000..954df2e
--- /dev/null
+++ b/vnftest/common/openstack_utils.py
@@ -0,0 +1,765 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/openstack_utils.py
+
+from __future__ import absolute_import
+
+import os
+import time
+import sys
+import logging
+
+from keystoneauth1 import loading
+from keystoneauth1 import session
+from cinderclient import client as cinderclient
+from novaclient import client as novaclient
+from glanceclient import client as glanceclient
+from neutronclient.neutron import client as neutronclient
+
+log = logging.getLogger(__name__)
+
+DEFAULT_HEAT_API_VERSION = '1'
+DEFAULT_API_VERSION = '2'
+
+
+# *********************************************
+#   CREDENTIALS
+# *********************************************
+def get_credentials():
+    """Returns a creds dictionary filled with parsed from env"""
+    creds = {}
+
+    keystone_api_version = os.getenv('OS_IDENTITY_API_VERSION')
+
+    if keystone_api_version is None or keystone_api_version == '2':
+        keystone_v3 = False
+        tenant_env = 'OS_TENANT_NAME'
+        tenant = 'tenant_name'
+    else:
+        keystone_v3 = True
+        tenant_env = 'OS_PROJECT_NAME'
+        tenant = 'project_name'
+
+    # The most common way to pass these info to the script is to do it
+    # through environment variables.
+    creds.update({
+        "username": os.environ.get("OS_USERNAME"),
+        "password": os.environ.get("OS_PASSWORD"),
+        "auth_url": os.environ.get("OS_AUTH_URL"),
+        tenant: os.environ.get(tenant_env)
+    })
+
+    if keystone_v3:
+        if os.getenv('OS_USER_DOMAIN_NAME') is not None:
+            creds.update({
+                "user_domain_name": os.getenv('OS_USER_DOMAIN_NAME')
+            })
+        if os.getenv('OS_PROJECT_DOMAIN_NAME') is not None:
+            creds.update({
+                "project_domain_name": os.getenv('OS_PROJECT_DOMAIN_NAME')
+            })
+
+    return creds
+
+
+def get_session_auth():
+    loader = loading.get_plugin_loader('password')
+    creds = get_credentials()
+    auth = loader.load_from_options(**creds)
+    return auth
+
+
+def get_session():
+    auth = get_session_auth()
+    try:
+        cacert = os.environ['OS_CACERT']
+    except KeyError:
+        return session.Session(auth=auth)
+    else:
+        insecure = os.getenv('OS_INSECURE', '').lower() == 'true'
+        cacert = False if insecure else cacert
+        return session.Session(auth=auth, verify=cacert)
+
+
+def get_endpoint(service_type, endpoint_type='publicURL'):
+    auth = get_session_auth()
+    # for multi-region, we need to specify region
+    # when finding the endpoint
+    return get_session().get_endpoint(auth=auth,
+                                      service_type=service_type,
+                                      endpoint_type=endpoint_type,
+                                      region_name=os.environ.get(
+                                          "OS_REGION_NAME"))
+
+
+# *********************************************
+#   CLIENTS
+# *********************************************
+def get_heat_api_version():     # pragma: no cover
+    try:
+        api_version = os.environ['HEAT_API_VERSION']
+    except KeyError:
+        return DEFAULT_HEAT_API_VERSION
+    else:
+        log.info("HEAT_API_VERSION is set in env as '%s'", api_version)
+        return api_version
+
+
+def get_cinder_client_version():      # pragma: no cover
+    try:
+        api_version = os.environ['OS_VOLUME_API_VERSION']
+    except KeyError:
+        return DEFAULT_API_VERSION
+    else:
+        log.info("OS_VOLUME_API_VERSION is set in env as '%s'", api_version)
+        return api_version
+
+
+def get_cinder_client():      # pragma: no cover
+    sess = get_session()
+    return cinderclient.Client(get_cinder_client_version(), session=sess)
+
+
+def get_nova_client_version():      # pragma: no cover
+    try:
+        api_version = os.environ['OS_COMPUTE_API_VERSION']
+    except KeyError:
+        return DEFAULT_API_VERSION
+    else:
+        log.info("OS_COMPUTE_API_VERSION is set in env as '%s'", api_version)
+        return api_version
+
+
+def get_nova_client():      # pragma: no cover
+    sess = get_session()
+    return novaclient.Client(get_nova_client_version(), session=sess)
+
+
+def get_neutron_client_version():   # pragma: no cover
+    try:
+        api_version = os.environ['OS_NETWORK_API_VERSION']
+    except KeyError:
+        return DEFAULT_API_VERSION
+    else:
+        log.info("OS_NETWORK_API_VERSION is set in env as '%s'", api_version)
+        return api_version
+
+
+def get_neutron_client():   # pragma: no cover
+    sess = get_session()
+    return neutronclient.Client(get_neutron_client_version(), session=sess)
+
+
+def get_glance_client_version():    # pragma: no cover
+    try:
+        api_version = os.environ['OS_IMAGE_API_VERSION']
+    except KeyError:
+        return DEFAULT_API_VERSION
+    else:
+        log.info("OS_IMAGE_API_VERSION is set in env as '%s'", api_version)
+        return api_version
+
+
+def get_glance_client():    # pragma: no cover
+    sess = get_session()
+    return glanceclient.Client(get_glance_client_version(), session=sess)
+
+
+# *********************************************
+#   NOVA
+# *********************************************
+def get_instances(nova_client):     # pragma: no cover
+    try:
+        return nova_client.servers.list(search_opts={'all_tenants': 1})
+    except Exception:
+        log.exception("Error [get_instances(nova_client)]")
+
+
+def get_instance_status(nova_client, instance):     # pragma: no cover
+    try:
+        return nova_client.servers.get(instance.id).status
+    except Exception:
+        log.exception("Error [get_instance_status(nova_client)]")
+
+
+def get_instance_by_name(nova_client, instance_name):   # pragma: no cover
+    try:
+        return nova_client.servers.find(name=instance_name)
+    except Exception:
+        log.exception("Error [get_instance_by_name(nova_client, '%s')]",
+                      instance_name)
+
+
+def get_aggregates(nova_client):    # pragma: no cover
+    try:
+        return nova_client.aggregates.list()
+    except Exception:
+        log.exception("Error [get_aggregates(nova_client)]")
+
+
+def get_availability_zones(nova_client):    # pragma: no cover
+    try:
+        return nova_client.availability_zones.list()
+    except Exception:
+        log.exception("Error [get_availability_zones(nova_client)]")
+
+
+def get_availability_zone_names(nova_client):   # pragma: no cover
+    try:
+        return [az.zoneName for az in get_availability_zones(nova_client)]
+    except Exception:
+        log.exception("Error [get_availability_zone_names(nova_client)]")
+
+
+def create_aggregate(nova_client, aggregate_name, av_zone):  # pragma: no cover
+    try:
+        nova_client.aggregates.create(aggregate_name, av_zone)
+    except Exception:
+        log.exception("Error [create_aggregate(nova_client, %s, %s)]",
+                      aggregate_name, av_zone)
+        return False
+    else:
+        return True
+
+
+def get_aggregate_id(nova_client, aggregate_name):      # pragma: no cover
+    try:
+        aggregates = get_aggregates(nova_client)
+        _id = next((ag.id for ag in aggregates if ag.name == aggregate_name))
+    except Exception:
+        log.exception("Error [get_aggregate_id(nova_client, %s)]",
+                      aggregate_name)
+    else:
+        return _id
+
+
+def add_host_to_aggregate(nova_client, aggregate_name,
+                          compute_host):    # pragma: no cover
+    try:
+        aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+        nova_client.aggregates.add_host(aggregate_id, compute_host)
+    except Exception:
+        log.exception("Error [add_host_to_aggregate(nova_client, %s, %s)]",
+                      aggregate_name, compute_host)
+        return False
+    else:
+        return True
+
+
+def create_aggregate_with_host(nova_client, aggregate_name, av_zone,
+                               compute_host):    # pragma: no cover
+    try:
+        create_aggregate(nova_client, aggregate_name, av_zone)
+        add_host_to_aggregate(nova_client, aggregate_name, compute_host)
+    except Exception:
+        log.exception("Error [create_aggregate_with_host("
+                      "nova_client, %s, %s, %s)]",
+                      aggregate_name, av_zone, compute_host)
+        return False
+    else:
+        return True
+
+
+def create_keypair(nova_client, name, key_path=None):    # pragma: no cover
+    try:
+        with open(key_path) as fpubkey:
+            keypair = get_nova_client().keypairs.create(name=name, public_key=fpubkey.read())
+            return keypair
+    except Exception:
+        log.exception("Error [create_keypair(nova_client)]")
+
+
+def create_instance(json_body):    # pragma: no cover
+    try:
+        return get_nova_client().servers.create(**json_body)
+    except Exception:
+        log.exception("Error create instance failed")
+        return None
+
+
+def create_instance_and_wait_for_active(json_body):    # pragma: no cover
+    SLEEP = 3
+    VM_BOOT_TIMEOUT = 180
+    nova_client = get_nova_client()
+    instance = create_instance(json_body)
+    count = VM_BOOT_TIMEOUT / SLEEP
+    for n in range(count, -1, -1):
+        status = get_instance_status(nova_client, instance)
+        if status.lower() == "active":
+            return instance
+        elif status.lower() == "error":
+            log.error("The instance went to ERROR status.")
+            return None
+        time.sleep(SLEEP)
+    log.error("Timeout booting the instance.")
+    return None
+
+
+def attach_server_volume(server_id, volume_id, device=None):    # pragma: no cover
+    try:
+        get_nova_client().volumes.create_server_volume(server_id, volume_id, device)
+    except Exception:
+        log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]",
+                      server_id, volume_id)
+        return False
+    else:
+        return True
+
+
+def delete_instance(nova_client, instance_id):      # pragma: no cover
+    try:
+        nova_client.servers.force_delete(instance_id)
+    except Exception:
+        log.exception("Error [delete_instance(nova_client, '%s')]",
+                      instance_id)
+        return False
+    else:
+        return True
+
+
+def remove_host_from_aggregate(nova_client, aggregate_name,
+                               compute_host):  # pragma: no cover
+    try:
+        aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+        nova_client.aggregates.remove_host(aggregate_id, compute_host)
+    except Exception:
+        log.exception("Error remove_host_from_aggregate(nova_client, %s, %s)",
+                      aggregate_name, compute_host)
+        return False
+    else:
+        return True
+
+
+def remove_hosts_from_aggregate(nova_client,
+                                aggregate_name):   # pragma: no cover
+    aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+    hosts = nova_client.aggregates.get(aggregate_id).hosts
+    assert(
+        all(remove_host_from_aggregate(nova_client, aggregate_name, host)
+            for host in hosts))
+
+
+def delete_aggregate(nova_client, aggregate_name):  # pragma: no cover
+    try:
+        remove_hosts_from_aggregate(nova_client, aggregate_name)
+        nova_client.aggregates.delete(aggregate_name)
+    except Exception:
+        log.exception("Error [delete_aggregate(nova_client, %s)]",
+                      aggregate_name)
+        return False
+    else:
+        return True
+
+
+def get_server_by_name(name):   # pragma: no cover
+    try:
+        return get_nova_client().servers.list(search_opts={'name': name})[0]
+    except IndexError:
+        log.exception('Failed to get nova client')
+        raise
+
+
+def create_flavor(name, ram, vcpus, disk, **kwargs):   # pragma: no cover
+    try:
+        return get_nova_client().flavors.create(name, ram, vcpus, disk, **kwargs)
+    except Exception:
+        log.exception("Error [create_flavor(nova_client, %s, %s, %s, %s, %s)]",
+                      name, ram, disk, vcpus, kwargs['is_public'])
+        return None
+
+
+def get_image_by_name(name):    # pragma: no cover
+    images = get_nova_client().images.list()
+    try:
+        return next((a for a in images if a.name == name))
+    except StopIteration:
+        log.exception('No image matched')
+
+
+def get_flavor_id(nova_client, flavor_name):    # pragma: no cover
+    flavors = nova_client.flavors.list(detailed=True)
+    flavor_id = ''
+    for f in flavors:
+        if f.name == flavor_name:
+            flavor_id = f.id
+            break
+    return flavor_id
+
+
+def get_flavor_by_name(name):   # pragma: no cover
+    flavors = get_nova_client().flavors.list()
+    try:
+        return next((a for a in flavors if a.name == name))
+    except StopIteration:
+        log.exception('No flavor matched')
+
+
+def check_status(status, name, iterations, interval):   # pragma: no cover
+    for i in range(iterations):
+        try:
+            server = get_server_by_name(name)
+        except IndexError:
+            log.error('Cannot found %s server', name)
+            raise
+
+        if server.status == status:
+            return True
+
+        time.sleep(interval)
+    return False
+
+
+def delete_flavor(flavor_id):    # pragma: no cover
+    try:
+        get_nova_client().flavors.delete(flavor_id)
+    except Exception:
+        log.exception("Error [delete_flavor(nova_client, %s)]", flavor_id)
+        return False
+    else:
+        return True
+
+
+def delete_keypair(nova_client, key):     # pragma: no cover
+    try:
+        nova_client.keypairs.delete(key=key)
+        return True
+    except Exception:
+        log.exception("Error [delete_keypair(nova_client)]")
+        return False
+
+
+# *********************************************
+#   NEUTRON
+# *********************************************
+def get_network_id(neutron_client, network_name):       # pragma: no cover
+    networks = neutron_client.list_networks()['networks']
+    return next((n['id'] for n in networks if n['name'] == network_name), None)
+
+
+def get_port_id_by_ip(neutron_client, ip_address):      # pragma: no cover
+    ports = neutron_client.list_ports()['ports']
+    return next((i['id'] for i in ports for j in i.get(
+        'fixed_ips') if j['ip_address'] == ip_address), None)
+
+
+def create_neutron_net(neutron_client, json_body):      # pragma: no cover
+    try:
+        network = neutron_client.create_network(body=json_body)
+        return network['network']['id']
+    except Exception:
+        log.error("Error [create_neutron_net(neutron_client)]")
+        raise Exception("operation error")
+        return None
+
+
+def delete_neutron_net(neutron_client, network_id):      # pragma: no cover
+    try:
+        neutron_client.delete_network(network_id)
+        return True
+    except Exception:
+        log.error("Error [delete_neutron_net(neutron_client, '%s')]" % network_id)
+        return False
+
+
+def create_neutron_subnet(neutron_client, json_body):      # pragma: no cover
+    try:
+        subnet = neutron_client.create_subnet(body=json_body)
+        return subnet['subnets'][0]['id']
+    except Exception:
+        log.error("Error [create_neutron_subnet")
+        raise Exception("operation error")
+        return None
+
+
+def create_neutron_router(neutron_client, json_body):      # pragma: no cover
+    try:
+        router = neutron_client.create_router(json_body)
+        return router['router']['id']
+    except Exception:
+        log.error("Error [create_neutron_router(neutron_client)]")
+        raise Exception("operation error")
+        return None
+
+
+def delete_neutron_router(neutron_client, router_id):      # pragma: no cover
+    try:
+        neutron_client.delete_router(router=router_id)
+        return True
+    except Exception:
+        log.error("Error [delete_neutron_router(neutron_client, '%s')]" % router_id)
+        return False
+
+
+def remove_gateway_router(neutron_client, router_id):      # pragma: no cover
+    try:
+        neutron_client.remove_gateway_router(router_id)
+        return True
+    except Exception:
+        log.error("Error [remove_gateway_router(neutron_client, '%s')]" % router_id)
+        return False
+
+
+def remove_interface_router(neutron_client, router_id, subnet_id,
+                            **json_body):      # pragma: no cover
+    json_body.update({"subnet_id": subnet_id})
+    try:
+        neutron_client.remove_interface_router(router=router_id,
+                                               body=json_body)
+        return True
+    except Exception:
+        log.error("Error [remove_interface_router(neutron_client, '%s', "
+                  "'%s')]" % (router_id, subnet_id))
+        return False
+
+
+def create_floating_ip(neutron_client, extnet_id):      # pragma: no cover
+    props = {'floating_network_id': extnet_id}
+    try:
+        ip_json = neutron_client.create_floatingip({'floatingip': props})
+        fip_addr = ip_json['floatingip']['floating_ip_address']
+        fip_id = ip_json['floatingip']['id']
+    except Exception:
+        log.error("Error [create_floating_ip(neutron_client)]")
+        return None
+    return {'fip_addr': fip_addr, 'fip_id': fip_id}
+
+
+def delete_floating_ip(nova_client, floatingip_id):      # pragma: no cover
+    try:
+        nova_client.floating_ips.delete(floatingip_id)
+        return True
+    except Exception:
+        log.error("Error [delete_floating_ip(nova_client, '%s')]" % floatingip_id)
+        return False
+
+
+def get_security_groups(neutron_client):      # pragma: no cover
+    try:
+        security_groups = neutron_client.list_security_groups()[
+            'security_groups']
+        return security_groups
+    except Exception:
+        log.error("Error [get_security_groups(neutron_client)]")
+        return None
+
+
+def get_security_group_id(neutron_client, sg_name):      # pragma: no cover
+    security_groups = get_security_groups(neutron_client)
+    id = ''
+    for sg in security_groups:
+        if sg['name'] == sg_name:
+            id = sg['id']
+            break
+    return id
+
+
+def create_security_group(neutron_client, sg_name, sg_description):      # pragma: no cover
+    json_body = {'security_group': {'name': sg_name,
+                                    'description': sg_description}}
+    try:
+        secgroup = neutron_client.create_security_group(json_body)
+        return secgroup['security_group']
+    except Exception:
+        log.error("Error [create_security_group(neutron_client, '%s', "
+                  "'%s')]" % (sg_name, sg_description))
+        return None
+
+
+def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
+                         port_range_min=None, port_range_max=None,
+                         **json_body):      # pragma: no cover
+    # We create a security group in 2 steps
+    # 1 - we check the format and set the json body accordingly
+    # 2 - we call neturon client to create the security group
+
+    # Format check
+    json_body.update({'security_group_rule': {'direction': direction,
+                     'security_group_id': sg_id, 'protocol': protocol}})
+    # parameters may be
+    # - both None => we do nothing
+    # - both Not None => we add them to the json description
+    # but one cannot be None is the other is not None
+    if (port_range_min is not None and port_range_max is not None):
+        # add port_range in json description
+        json_body['security_group_rule']['port_range_min'] = port_range_min
+        json_body['security_group_rule']['port_range_max'] = port_range_max
+        log.debug("Security_group format set (port range included)")
+    else:
+        # either both port range are set to None => do nothing
+        # or one is set but not the other => log it and return False
+        if port_range_min is None and port_range_max is None:
+            log.debug("Security_group format set (no port range mentioned)")
+        else:
+            log.error("Bad security group format."
+                      "One of the port range is not properly set:"
+                      "range min: {},"
+                      "range max: {}".format(port_range_min,
+                                             port_range_max))
+            return False
+
+    # Create security group using neutron client
+    try:
+        neutron_client.create_security_group_rule(json_body)
+        return True
+    except Exception:
+        log.exception("Impossible to create_security_group_rule,"
+                      "security group rule probably already exists")
+        return False
+
+
+def create_security_group_full(neutron_client,
+                               sg_name, sg_description):      # pragma: no cover
+    sg_id = get_security_group_id(neutron_client, sg_name)
+    if sg_id != '':
+        log.info("Using existing security group '%s'..." % sg_name)
+    else:
+        log.info("Creating security group  '%s'..." % sg_name)
+        SECGROUP = create_security_group(neutron_client,
+                                         sg_name,
+                                         sg_description)
+        if not SECGROUP:
+            log.error("Failed to create the security group...")
+            return None
+
+        sg_id = SECGROUP['id']
+
+        log.debug("Security group '%s' with ID=%s created successfully."
+                  % (SECGROUP['name'], sg_id))
+
+        log.debug("Adding ICMP rules in security group '%s'..."
+                  % sg_name)
+        if not create_secgroup_rule(neutron_client, sg_id,
+                                    'ingress', 'icmp'):
+            log.error("Failed to create the security group rule...")
+            return None
+
+        log.debug("Adding SSH rules in security group '%s'..."
+                  % sg_name)
+        if not create_secgroup_rule(
+                neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
+            log.error("Failed to create the security group rule...")
+            return None
+
+        if not create_secgroup_rule(
+                neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
+            log.error("Failed to create the security group rule...")
+            return None
+    return sg_id
+
+
+# *********************************************
+#   GLANCE
+# *********************************************
+def get_image_id(glance_client, image_name):    # pragma: no cover
+    images = glance_client.images.list()
+    return next((i.id for i in images if i.name == image_name), None)
+
+
+def create_image(glance_client, image_name, file_path, disk_format,
+                 container_format, min_disk, min_ram, protected, tag,
+                 public, **kwargs):    # pragma: no cover
+    if not os.path.isfile(file_path):
+        log.error("Error: file %s does not exist." % file_path)
+        return None
+    try:
+        image_id = get_image_id(glance_client, image_name)
+        if image_id is not None:
+            log.info("Image %s already exists." % image_name)
+        else:
+            log.info("Creating image '%s' from '%s'...", image_name, file_path)
+
+            image = glance_client.images.create(name=image_name,
+                                                visibility=public,
+                                                disk_format=disk_format,
+                                                container_format=container_format,
+                                                min_disk=min_disk,
+                                                min_ram=min_ram,
+                                                tags=tag,
+                                                protected=protected,
+                                                **kwargs)
+            image_id = image.id
+            with open(file_path) as image_data:
+                glance_client.images.upload(image_id, image_data)
+        return image_id
+    except Exception:
+        log.error("Error [create_glance_image(glance_client, '%s', '%s', '%s')]",
+                  image_name, file_path, public)
+        return None
+
+
+def delete_image(glance_client, image_id):    # pragma: no cover
+    try:
+        glance_client.images.delete(image_id)
+
+    except Exception:
+        log.exception("Error [delete_flavor(glance_client, %s)]", image_id)
+        return False
+    else:
+        return True
+
+
+# *********************************************
+#   CINDER
+# *********************************************
+def get_volume_id(volume_name):    # pragma: no cover
+    volumes = get_cinder_client().volumes.list()
+    return next((v.id for v in volumes if v.name == volume_name), None)
+
+
+def create_volume(cinder_client, volume_name, volume_size,
+                  volume_image=False):    # pragma: no cover
+    try:
+        if volume_image:
+            volume = cinder_client.volumes.create(name=volume_name,
+                                                  size=volume_size,
+                                                  imageRef=volume_image)
+        else:
+            volume = cinder_client.volumes.create(name=volume_name,
+                                                  size=volume_size)
+        return volume
+    except Exception:
+        log.exception("Error [create_volume(cinder_client, %s)]",
+                      (volume_name, volume_size))
+        return None
+
+
+def delete_volume(cinder_client, volume_id, forced=False):      # pragma: no cover
+    try:
+        if forced:
+            try:
+                cinder_client.volumes.detach(volume_id)
+            except:
+                log.error(sys.exc_info()[0])
+            cinder_client.volumes.force_delete(volume_id)
+        else:
+            while True:
+                volume = get_cinder_client().volumes.get(volume_id)
+                if volume.status.lower() == 'available':
+                    break
+            cinder_client.volumes.delete(volume_id)
+        return True
+    except Exception:
+        log.exception("Error [delete_volume(cinder_client, '%s')]" % volume_id)
+        return False
+
+
+def detach_volume(server_id, volume_id):      # pragma: no cover
+    try:
+        get_nova_client().volumes.delete_server_volume(server_id, volume_id)
+        return True
+    except Exception:
+        log.exception("Error [detach_server_volume(nova_client, '%s', '%s')]",
+                      server_id, volume_id)
+        return False
diff --git a/vnftest/common/process.py b/vnftest/common/process.py
new file mode 100644
index 0000000..21a21ac
--- /dev/null
+++ b/vnftest/common/process.py
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/process.py
+
+import logging
+import multiprocessing
+import signal
+import subprocess
+import time
+
+import os
+from oslo_utils import encodeutils
+
+from vnftest.common import exceptions
+from vnftest.common import utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+def check_if_process_failed(proc, timeout=1):
+    if proc is not None:
+        proc.join(timeout)
+        # Only abort if the process aborted
+        if proc.exitcode is not None and proc.exitcode > 0:
+            raise RuntimeError("{} exited with status {}".format(proc.name, proc.exitcode))
+
+
+def terminate_children(timeout=3):
+    current_proccess = multiprocessing.current_process()
+    active_children = multiprocessing.active_children()
+    if not active_children:
+        LOG.debug("no children to terminate")
+        return
+    for child in active_children:
+        LOG.debug("%s %s %s, child: %s %s", current_proccess.name, current_proccess.pid,
+                  os.getpid(), child, child.pid)
+        LOG.debug("joining %s", child)
+        child.join(timeout)
+        child.terminate()
+    active_children = multiprocessing.active_children()
+    if not active_children:
+        LOG.debug("no children to terminate")
+    for child in active_children:
+        LOG.debug("%s %s %s, after terminate child: %s %s", current_proccess.name,
+                  current_proccess.pid, os.getpid(), child, child.pid)
+
+
+def _additional_env_args(additional_env):
+    """Build arguments for adding additional environment vars with env"""
+    if additional_env is None:
+        return []
+    return ['env'] + ['%s=%s' % pair for pair in additional_env.items()]
+
+
+def _subprocess_setup():
+    # Python installs a SIGPIPE handler by default. This is usually not what
+    # non-Python subprocesses expect.
+    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+
+def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
+                     env=None, preexec_fn=_subprocess_setup, close_fds=True):
+    return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
+                            stderr=stderr, preexec_fn=preexec_fn,
+                            close_fds=close_fds, env=env)
+
+
+def create_process(cmd, run_as_root=False, additional_env=None):
+    """Create a process object for the given command.
+
+    The return value will be a tuple of the process object and the
+    list of command arguments used to create it.
+    """
+    if not isinstance(cmd, list):
+        cmd = [cmd]
+    cmd = list(map(str, _additional_env_args(additional_env) + cmd))
+    if run_as_root:
+        # NOTE(ralonsoh): to handle a command executed as root, using
+        # a root wrapper, instead of using "sudo".
+        pass
+    LOG.debug("Running command: %s", cmd)
+    obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE,
+                           stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    return obj, cmd
+
+
+def execute(cmd, process_input=None, additional_env=None,
+            check_exit_code=True, return_stderr=False, log_fail_as_error=True,
+            extra_ok_codes=None, run_as_root=False):
+    try:
+        if process_input is not None:
+            _process_input = encodeutils.to_utf8(process_input)
+        else:
+            _process_input = None
+
+        # NOTE(ralonsoh): to handle the execution of a command as root,
+        # using a root wrapper, instead of using "sudo".
+        obj, cmd = create_process(cmd, run_as_root=run_as_root,
+                                  additional_env=additional_env)
+        _stdout, _stderr = obj.communicate(_process_input)
+        returncode = obj.returncode
+        obj.stdin.close()
+        _stdout = utils.safe_decode_utf8(_stdout)
+        _stderr = utils.safe_decode_utf8(_stderr)
+
+        extra_ok_codes = extra_ok_codes or []
+        if returncode and returncode not in extra_ok_codes:
+            msg = ("Exit code: %(returncode)d; "
+                   "Stdin: %(stdin)s; "
+                   "Stdout: %(stdout)s; "
+                   "Stderr: %(stderr)s") % {'returncode': returncode,
+                                            'stdin': process_input or '',
+                                            'stdout': _stdout,
+                                            'stderr': _stderr}
+            if log_fail_as_error:
+                LOG.error(msg)
+            if check_exit_code:
+                raise exceptions.ProcessExecutionError(msg,
+                                                       returncode=returncode)
+
+    finally:
+        # This appears to be necessary in order for the subprocess to clean up
+        # something between call; without it, the second process hangs when two
+        # execute calls are made in a row.
+        time.sleep(0)
+
+    return (_stdout, _stderr) if return_stderr else _stdout
diff --git a/vnftest/common/rest_client.py b/vnftest/common/rest_client.py
new file mode 100644
index 0000000..23a108c
--- /dev/null
+++ b/vnftest/common/rest_client.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+import json
+import urllib2
+import requests
+
+
+def post(url, headers, data, logger):
+    return call(url, 'POST', headers, data, logger)
+
+
+def call(url, method, headers, data, logger):
+    data_json = json.dumps(data)
+    f = None
+    try:
+        req = urllib2.Request(url, data=data_json, headers=headers)
+        req.get_method = lambda: method
+        f = urllib2.urlopen(req)
+        return_code = f.code
+        response_body = f.read()
+        f.close()
+        if len(str(response_body)) == 0:
+            response_body = "{}"
+        response_body = json.loads(response_body)
+        result = {'return_code': return_code, 'body': response_body}
+        return result
+
+    except Exception as e:
+        message = "Cannot read content from {}, exception: {}".format(url, e)
+        logger.exception(message)
+        raise RuntimeError(message)
+    finally:
+        if f is not None:
+            f.close()
+
+
+def upload_file(url, headers, file, logger):
+    logger.debug("Upload file. URL: {}".format(url))
+    response = None
+    try:
+        response = requests.post(url, headers=headers, files=file)
+        return {'return_code': response.status_code, 'body': response.json()}
+    except Exception as e:
+        message = "Error while uploading file to {}, exception: {}".format(url, e)
+        logger.exception(message)
+        raise RuntimeError(message)
+    finally:
+        if response is not None:
+            response.close()
diff --git a/vnftest/common/task_template.py b/vnftest/common/task_template.py
new file mode 100755
index 0000000..7872aed
--- /dev/null
+++ b/vnftest/common/task_template.py
@@ -0,0 +1,78 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/task_template.py
+
+from __future__ import absolute_import
+import re
+import jinja2
+import jinja2.meta
+import yaml
+
+
+def finalize_for_yaml(elem):
+    """Render Jinja2 output specifically for YAML files"""
+    # Jinaj2 by default converts None to 'None', we can't allow this
+    # we could convert to empty string '', or we can convert to null, aka ~
+    if elem is None:
+        return '~'
+    # convert data structures to inline YAML
+    # match builtin types because we shouldn't be trying to render complex types
+    if isinstance(elem, (dict, list)):
+        # remove newlines because we are injecting back into YAML
+        # use block style for single line
+        return yaml.safe_dump(elem, default_flow_style=True).replace('\n', '')
+    return elem
+
+
+class TaskTemplate(object):
+
+    @classmethod
+    def render(cls, task_template, **kwargs):
+        """Render jinja2 task template to Vnftest input task.
+
+        :param task_template: string that contains template
+        :param kwargs: Dict with template arguments
+        :returns:rendered template str
+        """
+
+        from six.moves import builtins
+
+        ast = jinja2.Environment().parse(task_template)
+        required_kwargs = jinja2.meta.find_undeclared_variables(ast)
+
+        missing = set(required_kwargs) - set(kwargs) - set(dir(builtins))
+        real_missing = [mis for mis in missing
+                        if is_really_missing(mis, task_template)]
+
+        if real_missing:
+            multi_msg = ("Please specify next template task arguments:%s")
+            single_msg = ("Please specify template task argument:%s")
+            raise TypeError((len(real_missing) > 1 and multi_msg or single_msg)
+                            % ", ".join(real_missing))
+        return jinja2.Template(task_template, finalize=finalize_for_yaml).render(**kwargs)
+
+
+def is_really_missing(mis, task_template):
+    # Removing variables that have default values from
+    # missing. Construction that won't be properly
+    # check is {% set x = x or 1}
+    if re.search(mis.join([r"{%\s*set\s+", "\s*=\s*", r"[^\w]+"]),
+                 task_template):
+        return False
+    # Also check for a default filter which can show up as
+    # a missing variable
+    if re.search(mis + r"\s*\|\s*default\(", task_template):
+        return False
+    return True
diff --git a/vnftest/common/template_format.py b/vnftest/common/template_format.py
new file mode 100644
index 0000000..5cfc2f2
--- /dev/null
+++ b/vnftest/common/template_format.py
@@ -0,0 +1,72 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/template_format.py
+
+from __future__ import absolute_import
+
+import yaml
+from oslo_serialization import jsonutils
+
+if hasattr(yaml, 'CSafeLoader'):
+    # make a dynamic subclass so we don't override global yaml Loader
+    yaml_loader = type('HeatYamlLoader', (yaml.CSafeLoader,), {})
+else:
+    yaml_loader = type('HeatYamlLoader', (yaml.SafeLoader,), {})
+
+if hasattr(yaml, 'CSafeDumper'):
+    yaml_dumper = yaml.CSafeDumper
+else:
+    yaml_dumper = yaml.SafeDumper
+
+
+# This breaks NetworkServiceTestCase yaml loading, because we need to conversion to
+# native Python str() objects because we use use Trex and Trex is has broken unicode handling
+def _construct_yaml_str(self, node):
+    # Override the default string handling function
+    # to always return unicode objects
+    return self.construct_scalar(node)
+
+yaml_loader.add_constructor(u'tag:yaml.org,2002:str', _construct_yaml_str)
+# Unquoted dates like 2013-05-23 in yaml files get loaded as objects of type
+# datetime.data which causes problems in API layer when being processed by
+# openstack.common.jsonutils. Therefore, make unicode string out of timestamps
+# until jsonutils can handle dates.
+yaml_loader.add_constructor(u'tag:yaml.org,2002:timestamp',
+                            _construct_yaml_str)
+
+
+def parse(tmpl_str):
+    """Takes a string and returns a dict containing the parsed structure.
+
+    This includes determination of whether the string is using the
+    JSON or YAML format.
+    """
+    if tmpl_str.startswith('{'):
+        tpl = jsonutils.loads(tmpl_str)
+    else:
+        try:
+            # we already use SafeLoader when constructing special Heat YAML loader class
+            tpl = yaml.load(tmpl_str, Loader=yaml_loader)
+        except yaml.YAMLError as yea:
+            raise ValueError(yea)
+        else:
+            if tpl is None:
+                tpl = {}
+    # Looking for supported version keys in the loaded template
+    if not ('HeatTemplateFormatVersion' in tpl or
+            'heat_template_version' in tpl or
+            'AWSTemplateFormatVersion' in tpl):
+        raise ValueError("Template format version not found.")
+    return tpl
diff --git a/vnftest/common/utils.py b/vnftest/common/utils.py
new file mode 100644
index 0000000..e62b5db
--- /dev/null
+++ b/vnftest/common/utils.py
@@ -0,0 +1,399 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/utils.py
+
+import collections
+from contextlib import closing
+import datetime
+import errno
+import importlib
+import ipaddress
+import logging
+import os
+import random
+import socket
+import subprocess
+import sys
+
+import six
+from flask import jsonify
+from six.moves import configparser
+from oslo_serialization import jsonutils
+
+import vnftest
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+# Decorator for cli-args
+def cliargs(*args, **kwargs):
+    def _decorator(func):
+        func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs))
+        return func
+    return _decorator
+
+
+def itersubclasses(cls, _seen=None):
+    """Generator over all subclasses of a given class in depth first order."""
+
+    if not isinstance(cls, type):
+        raise TypeError("itersubclasses must be called with "
+                        "new-style classes, not %.100r" % cls)
+    _seen = _seen or set()
+    try:
+        subs = cls.__subclasses__()
+    except TypeError:   # fails only when cls is type
+        subs = cls.__subclasses__(cls)
+    for sub in subs:
+        if sub not in _seen:
+            _seen.add(sub)
+            yield sub
+            for sub in itersubclasses(sub, _seen):
+                yield sub
+
+
+def import_modules_from_package(package):
+    """Import modules given a package name
+
+    :param: package - Full package name. For example: rally.deploy.engines
+    """
+    vnftest_root = os.path.dirname(os.path.dirname(vnftest.__file__))
+    path = os.path.join(vnftest_root, *package.split('.'))
+    for root, _, files in os.walk(path):
+        matches = (filename for filename in files if filename.endswith('.py')
+                   and not filename.startswith('__'))
+        new_package = os.path.relpath(root, vnftest_root).replace(os.sep,
+                                                                    '.')
+        module_names = set(
+            '{}.{}'.format(new_package, filename.rsplit('.py', 1)[0])
+            for filename in matches)
+        # Find modules which haven't already been imported
+        missing_modules = module_names.difference(sys.modules)
+        logger.debug('Importing modules: %s', missing_modules)
+        for module_name in missing_modules:
+            try:
+                importlib.import_module(module_name)
+            except (ImportError, SyntaxError):
+                logger.exception('Unable to import module %s', module_name)
+
+
+def makedirs(d):
+    try:
+        os.makedirs(d)
+    except OSError as e:
+        if e.errno != errno.EEXIST:
+            raise
+
+
+def remove_file(path):
+    try:
+        os.remove(path)
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
+
+
+def execute_command(cmd):
+    exec_msg = "Executing command: '%s'" % cmd
+    logger.debug(exec_msg)
+
+    output = subprocess.check_output(cmd.split()).split(os.linesep)
+
+    return output
+
+
+def source_env(env_file):
+    p = subprocess.Popen(". %s; env" % env_file, stdout=subprocess.PIPE,
+                         shell=True)
+    output = p.communicate()[0]
+    env = dict(line.split('=', 1) for line in output.splitlines() if '=' in line)
+    os.environ.update(env)
+    return env
+
+
+def read_json_from_file(path):
+    with open(path, 'r') as f:
+        j = f.read()
+    # don't use jsonutils.load() it conflicts with already decoded input
+    return jsonutils.loads(j)
+
+
+def write_json_to_file(path, data, mode='w'):
+    with open(path, mode) as f:
+        jsonutils.dump(data, f)
+
+
+def write_file(path, data, mode='w'):
+    with open(path, mode) as f:
+        f.write(data)
+
+
+def parse_ini_file(path):
+    parser = configparser.ConfigParser()
+
+    try:
+        files = parser.read(path)
+    except configparser.MissingSectionHeaderError:
+        logger.exception('invalid file type')
+        raise
+    else:
+        if not files:
+            raise RuntimeError('file not exist')
+
+    try:
+        default = {k: v for k, v in parser.items('DEFAULT')}
+    except configparser.NoSectionError:
+        default = {}
+
+    config = dict(DEFAULT=default,
+                  **{s: {k: v for k, v in parser.items(
+                      s)} for s in parser.sections()})
+
+    return config
+
+
+def get_port_mac(sshclient, port):
+    cmd = "ifconfig |grep HWaddr |grep %s |awk '{print $5}' " % port
+    status, stdout, stderr = sshclient.execute(cmd)
+
+    if status:
+        raise RuntimeError(stderr)
+    return stdout.rstrip()
+
+
+def get_port_ip(sshclient, port):
+    cmd = "ifconfig %s |grep 'inet addr' |awk '{print $2}' " \
+        "|cut -d ':' -f2 " % port
+    status, stdout, stderr = sshclient.execute(cmd)
+
+    if status:
+        raise RuntimeError(stderr)
+    return stdout.rstrip()
+
+
+def flatten_dict_key(data):
+    next_data = {}
+
+    # use list, because iterable is too generic
+    if not any(isinstance(v, (collections.Mapping, list))
+               for v in data.values()):
+        return data
+
+    for k, v in data.items():
+        if isinstance(v, collections.Mapping):
+            for n_k, n_v in v.items():
+                next_data["%s.%s" % (k, n_k)] = n_v
+        # use list because iterable is too generic
+        elif isinstance(v, collections.Iterable) and not isinstance(v, six.string_types):
+            for index, item in enumerate(v):
+                next_data["%s%d" % (k, index)] = item
+        else:
+            next_data[k] = v
+
+    return flatten_dict_key(next_data)
+
+
+def translate_to_str(obj):
+    if isinstance(obj, collections.Mapping):
+        return {str(k): translate_to_str(v) for k, v in obj.items()}
+    elif isinstance(obj, list):
+        return [translate_to_str(ele) for ele in obj]
+    elif isinstance(obj, six.text_type):
+        return str(obj)
+    return obj
+
+
+def result_handler(status, data):
+    result = {
+        'status': status,
+        'result': data
+    }
+    return jsonify(result)
+
+
+def change_obj_to_dict(obj):
+    dic = {}
+    for k, v in vars(obj).items():
+        try:
+            vars(v)
+        except TypeError:
+            dic.update({k: v})
+    return dic
+
+
+def set_dict_value(dic, keys, value):
+    return_dic = dic
+
+    for key in keys.split('.'):
+        return_dic.setdefault(key, {})
+        if key == keys.split('.')[-1]:
+            return_dic[key] = value
+        else:
+            return_dic = return_dic[key]
+    return dic
+
+
+def get_free_port(ip):
+    with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
+        port = random.randint(5000, 10000)
+        while s.connect_ex((ip, port)) == 0:
+            port = random.randint(5000, 10000)
+        return port
+
+
+def mac_address_to_hex_list(mac):
+    octets = ["0x{:02x}".format(int(elem, 16)) for elem in mac.split(':')]
+    assert len(octets) == 6 and all(len(octet) == 4 for octet in octets)
+    return octets
+
+
+def safe_ip_address(ip_addr):
+    """ get ip address version v6 or v4 """
+    try:
+        return ipaddress.ip_address(six.text_type(ip_addr))
+    except ValueError:
+        logging.error("%s is not valid", ip_addr)
+        return None
+
+
+def get_ip_version(ip_addr):
+    """ get ip address version v6 or v4 """
+    try:
+        address = ipaddress.ip_address(six.text_type(ip_addr))
+    except ValueError:
+        logging.error("%s is not valid", ip_addr)
+        return None
+    else:
+        return address.version
+
+
+def ip_to_hex(ip_addr, separator=''):
+    try:
+        address = ipaddress.ip_address(six.text_type(ip_addr))
+    except ValueError:
+        logging.error("%s is not valid", ip_addr)
+        return ip_addr
+
+    if address.version != 4:
+        return ip_addr
+
+    if not separator:
+        return '{:08x}'.format(int(address))
+
+    return separator.join('{:02x}'.format(octet) for octet in address.packed)
+
+
+def try_int(s, *args):
+    """Convert to integer if possible."""
+    try:
+        return int(s)
+    except (TypeError, ValueError):
+        return args[0] if args else s
+
+
+class SocketTopology(dict):
+
+    @classmethod
+    def parse_cpuinfo(cls, cpuinfo):
+        socket_map = {}
+
+        lines = cpuinfo.splitlines()
+
+        core_details = []
+        core_lines = {}
+        for line in lines:
+            if line.strip():
+                name, value = line.split(":", 1)
+                core_lines[name.strip()] = try_int(value.strip())
+            else:
+                core_details.append(core_lines)
+                core_lines = {}
+
+        for core in core_details:
+            socket_map.setdefault(core["physical id"], {}).setdefault(
+                core["core id"], {})[core["processor"]] = (
+                core["processor"], core["core id"], core["physical id"])
+
+        return cls(socket_map)
+
+    def sockets(self):
+        return sorted(self.keys())
+
+    def cores(self):
+        return sorted(core for cores in self.values() for core in cores)
+
+    def processors(self):
+        return sorted(
+            proc for cores in self.values() for procs in cores.values() for
+            proc in procs)
+
+
+def config_to_dict(config):
+    return {section: dict(config.items(section)) for section in
+            config.sections()}
+
+
+def validate_non_string_sequence(value, default=None, raise_exc=None):
+    # NOTE(ralonsoh): refactor this function to check if raise_exc is an
+    # Exception. Remove duplicate code, this function is duplicated in this
+    # repository.
+    if isinstance(value, collections.Sequence) and not isinstance(value, six.string_types):
+        return value
+    if raise_exc:
+        raise raise_exc  # pylint: disable=raising-bad-type
+    return default
+
+
+def join_non_strings(separator, *non_strings):
+    try:
+        non_strings = validate_non_string_sequence(non_strings[0], raise_exc=RuntimeError)
+    except (IndexError, RuntimeError):
+        pass
+    return str(separator).join(str(non_string) for non_string in non_strings)
+
+
+def safe_decode_utf8(s):
+    """Safe decode a str from UTF"""
+    if six.PY3 and isinstance(s, bytes):
+        return s.decode('utf-8', 'surrogateescape')
+    return s
+
+
+class ErrorClass(object):
+
+    def __init__(self, *args, **kwargs):
+        if 'test' not in kwargs:
+            raise RuntimeError
+
+    def __getattr__(self, item):
+        raise AttributeError
+
+
+class Timer(object):
+    def __init__(self):
+        super(Timer, self).__init__()
+        self.start = self.delta = None
+
+    def __enter__(self):
+        self.start = datetime.datetime.now()
+        return self
+
+    def __exit__(self, *_):
+        self.delta = datetime.datetime.now() - self.start
+
+    def __getattr__(self, item):
+        return getattr(self.delta, item)
+
diff --git a/vnftest/common/yaml_loader.py b/vnftest/common/yaml_loader.py
new file mode 100644
index 0000000..4f93e62
--- /dev/null
+++ b/vnftest/common/yaml_loader.py
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/yaml_loader.py
+
+from __future__ import absolute_import
+
+import yaml
+
+
+if hasattr(yaml, 'CSafeLoader'):
+    # make a dynamic subclass so we don't override global yaml Loader
+    yaml_loader = type('CustomLoader', (yaml.CSafeLoader,), {})
+else:
+    yaml_loader = type('CustomLoader', (yaml.SafeLoader,), {})
+
+if hasattr(yaml, 'CSafeDumper'):
+    yaml_dumper = yaml.CSafeDumper
+else:
+    yaml_dumper = yaml.SafeDumper
+
+
+def yaml_load(tmpl_str):
+    return yaml.load(tmpl_str, Loader=yaml_loader)
diff --git a/vnftest/dispatcher/__init__.py b/vnftest/dispatcher/__init__.py
new file mode 100644
index 0000000..232233f
--- /dev/null
+++ b/vnftest/dispatcher/__init__.py
@@ -0,0 +1,30 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/dispatcher/__init__.py
+
+from __future__ import absolute_import
+from oslo_config import cfg
+
+import vnftest.common.utils as utils
+
+utils.import_modules_from_package("vnftest.dispatcher")
+
+CONF = cfg.CONF
+OPTS = [
+    cfg.StrOpt('dispatcher',
+               default='file',
+               help='Dispatcher to store data.'),
+]
+CONF.register_opts(OPTS)
diff --git a/vnftest/dispatcher/base.py b/vnftest/dispatcher/base.py
new file mode 100644
index 0000000..133b792
--- /dev/null
+++ b/vnftest/dispatcher/base.py
@@ -0,0 +1,50 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/dispatcher/base.py
+
+from __future__ import absolute_import
+import abc
+import six
+
+import vnftest.common.utils as utils
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Base(object):
+
+    def __init__(self, conf):
+        self.conf = conf
+
+    @staticmethod
+    def get_cls(dispatcher_type):
+        """Return class of specified type."""
+        for dispatcher in utils.itersubclasses(Base):
+            if dispatcher_type == dispatcher.__dispatcher_type__:
+                return dispatcher
+        raise RuntimeError("No such dispatcher_type %s" % dispatcher_type)
+
+    @staticmethod
+    def get(config):
+        """Returns instance of a dispatcher for dispatcher type.
+        """
+        list_dispatcher = \
+            [Base.get_cls(out_type.capitalize())(config)
+             for out_type in config['DEFAULT']['dispatcher']]
+
+        return list_dispatcher
+
+    @abc.abstractmethod
+    def flush_result_data(self, data):
+        """Flush result data into permanent storage media interface."""
diff --git a/vnftest/dispatcher/file.py b/vnftest/dispatcher/file.py
new file mode 100644
index 0000000..83d0fee
--- /dev/null
+++ b/vnftest/dispatcher/file.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/dispatcher/file.py
+
+from __future__ import absolute_import
+
+from vnftest.dispatcher.base import Base as DispatchBase
+from vnftest.common import constants as consts
+from vnftest.common import utils
+
+
+class FileDispatcher(DispatchBase):
+    """Dispatcher class for recording data to a file.
+    """
+
+    __dispatcher_type__ = "File"
+
+    def __init__(self, conf):
+        super(FileDispatcher, self).__init__(conf)
+        self.target = conf['dispatcher_file'].get('file_path',
+                                                  consts.DEFAULT_OUTPUT_FILE)
+
+    def flush_result_data(self, data):
+        utils.write_json_to_file(self.target, data)
diff --git a/vnftest/dispatcher/http.py b/vnftest/dispatcher/http.py
new file mode 100644
index 0000000..da66c90
--- /dev/null
+++ b/vnftest/dispatcher/http.py
@@ -0,0 +1,94 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/dispatcher/http.py
+
+from __future__ import absolute_import
+
+import logging
+import os
+from datetime import datetime
+
+from oslo_serialization import jsonutils
+import requests
+
+from vnftest.dispatcher.base import Base as DispatchBase
+
+LOG = logging.getLogger(__name__)
+
+
+class HttpDispatcher(DispatchBase):
+    """Dispatcher class for posting data into a http target.
+    """
+
+    __dispatcher_type__ = "Http"
+
+    def __init__(self, conf):
+        super(HttpDispatcher, self).__init__(conf)
+        http_conf = conf['dispatcher_http']
+        self.headers = {'Content-type': 'application/json'}
+        self.timeout = int(http_conf.get('timeout', 5))
+        self.target = http_conf.get('target', 'http://127.0.0.1:8000/results')
+
+    def flush_result_data(self, data):
+        if self.target == '':
+            # if the target was not set, do not do anything
+            LOG.error('Dispatcher target was not set, no data will'
+                      'be posted.')
+            return
+
+        result = data['result']
+        self.info = result['info']
+        self.task_id = result['task_id']
+        self.criteria = result['criteria']
+        testcases = result['testcases']
+
+        for case, data in testcases.items():
+            self._upload_case_result(case, data)
+
+    def _upload_case_result(self, case, data):
+        try:
+            step_data = data.get('tc_data', [])[0]
+        except IndexError:
+            current_time = datetime.now()
+        else:
+            timestamp = float(step_data.get('timestamp', 0.0))
+            current_time = datetime.fromtimestamp(timestamp)
+
+        result = {
+            "project_name": "vnftest",
+            "case_name": case,
+            "description": "vnftest ci step status",
+            "step": self.info.get('deploy_step'),
+            "version": self.info.get('version'),
+            "pod_name": self.info.get('pod_name'),
+            "installer": self.info.get('installer'),
+            "build_tag": os.environ.get('BUILD_TAG'),
+            "criteria": data.get('criteria'),
+            "start_date": current_time.strftime('%Y-%m-%d %H:%M:%S'),
+            "stop_date": current_time.strftime('%Y-%m-%d %H:%M:%S'),
+            "trust_indicator": "",
+            "details": ""
+        }
+
+        try:
+            LOG.debug('Test result : %s', result)
+            res = requests.post(self.target,
+                                data=jsonutils.dump_as_bytes(result),
+                                headers=self.headers,
+                                timeout=self.timeout)
+            LOG.debug('Test result posting finished with status code'
+                      ' %d.' % res.status_code)
+        except Exception as err:
+            LOG.exception('Failed to record result data: %s', err)
diff --git a/vnftest/main.py b/vnftest/main.py
new file mode 100755
index 0000000..308867c
--- /dev/null
+++ b/vnftest/main.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tests/main.py
+""" vnftest - command line tool for managing benchmarks
+
+    Example invocation:
+    $ vnftest task start samples/ping.yaml
+
+    Servers are the same as VMs (Nova calls them servers in the API)
+
+    Many tests use a client/server architecture. A test client is configured
+    to use a specific test server e.g. using an IP address. This is true for
+    example iperf. In some cases the test server is included in the kernel
+    (ping, pktgen) and no additional software is needed on the server. In other
+    cases (iperf) a server process needs to be installed and started.
+
+    One server is required to host the test client program (such as ping or
+    iperf). In the task file this server is called host.
+
+    A server can be the _target_ of a test client (think ping destination
+    argument). A target server is optional but needed in most test steps.
+    In the task file this server is called target. This is probably the same
+    as DUT in existing terminology.
+
+    Existing terminology:
+    https://www.ietf.org/rfc/rfc1242.txt (throughput/latency)
+    https://www.ietf.org/rfc/rfc2285.txt (DUT/SUT)
+
+    New terminology:
+    NFV TST
+
+"""
+from __future__ import absolute_import
+import sys
+
+from vnftest.cmd.cli import VnftestCLI
+
+
+def main():
+    """vnftest main"""
+    VnftestCLI().main(sys.argv[1:])
+
+if __name__ == '__main__':
+    main()
diff --git a/vnftest/onap/__init__.py b/vnftest/onap/__init__.py
new file mode 100644
index 0000000..7382128
--- /dev/null
+++ b/vnftest/onap/__init__.py
@@ -0,0 +1,20 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+from __future__ import absolute_import
+import vnftest.common.utils as utils
+
+utils.import_modules_from_package("vnftest.benchmark.contexts")
+utils.import_modules_from_package("vnftest.benchmark.runners")
+utils.import_modules_from_package("vnftest.benchmark.steps")
diff --git a/vnftest/onap/contexts/__init__.py b/vnftest/onap/contexts/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/onap/contexts/__init__.py
diff --git a/vnftest/onap/contexts/base.py b/vnftest/onap/contexts/base.py
new file mode 100644
index 0000000..03c3e1f
--- /dev/null
+++ b/vnftest/onap/contexts/base.py
@@ -0,0 +1,64 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+import abc
+import six
+import vnftest.common.utils as utils
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Context(object):
+    """Class that represents a context in the logical model"""
+    list = []
+
+    @staticmethod
+    def split_name(name, sep='.'):
+        try:
+            name_iter = iter(name.split(sep))
+        except AttributeError:
+            # name is not a string
+            return None, None
+        return next(name_iter), next(name_iter, None)
+
+    def __init__(self):
+        Context.list.append(self)
+
+    @abc.abstractmethod
+    def init(self, attrs):
+        """Initiate context."""
+
+    @staticmethod
+    def get_cls(context_type):
+        """Return class of specified type."""
+        for context in utils.itersubclasses(Context):
+            if context_type == context.__context_type__:
+                return context
+        raise RuntimeError("No such context_type %s" % context_type)
+
+    @staticmethod
+    def get(context_type):
+        """Returns instance of a context for context type.
+        """
+        return Context.get_cls(context_type)()
+
+    def _delete_context(self):
+        Context.list.remove(self)
+
+    @abc.abstractmethod
+    def deploy(self):
+        """Deploy context."""
+
+    @abc.abstractmethod
+    def undeploy(self):
+        """Undeploy context."""
+        self._delete_context()
diff --git a/vnftest/onap/contexts/csar.py b/vnftest/onap/contexts/csar.py
new file mode 100644
index 0000000..8d89467
--- /dev/null
+++ b/vnftest/onap/contexts/csar.py
@@ -0,0 +1,44 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+import logging
+from vnftest.onap.contexts.base import Context
+
+LOG = logging.getLogger(__name__)
+
+
+class CSARContext(Context):
+    """Class that handle sdc info"""
+
+    __context_type__ = "CSAR"
+
+    def __init__(self):
+        self.csar_name = None
+        self.csar_id = None
+        self.csar_package_location = None
+        super(CSARContext, self).__init__()
+
+    def init(self, attrs):
+        """initializes itself from the supplied arguments"""
+        self.csar_name = attrs.get("csar_name")
+        self.csar_id = attrs.get("csar_id")
+        self.csar_package_location = attrs.get("csar_package_location")
+
+    def deploy(self):
+        """no need to deploy"""
+        pass
+
+    def undeploy(self):
+        """no need to undeploy"""
+        super(CSARContext, self).undeploy()
diff --git a/vnftest/onap/contexts/dummy.py b/vnftest/onap/contexts/dummy.py
new file mode 100644
index 0000000..b61d55e
--- /dev/null
+++ b/vnftest/onap/contexts/dummy.py
@@ -0,0 +1,41 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+from __future__ import absolute_import
+import logging
+
+from vnftest.onap.contexts.base import Context
+
+
+LOG = logging.getLogger(__name__)
+
+
+class DummyContext(Context):
+    """Class that handle dummy info"""
+
+    __context_type__ = "Dummy"
+
+    def __init__(self):
+        super(DummyContext, self).__init__()
+
+    def init(self, attrs):
+        pass
+
+    def deploy(self):
+        """don't need to deploy"""
+        pass
+
+    def undeploy(self):
+        """don't need to undeploy"""
+        super(DummyContext, self).undeploy()
diff --git a/vnftest/onap/core/__init__.py b/vnftest/onap/core/__init__.py
new file mode 100644
index 0000000..c204f9d
--- /dev/null
+++ b/vnftest/onap/core/__init__.py
@@ -0,0 +1,45 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/init.py
+"""
+Vnftest core.
+"""
+
+from __future__ import print_function
+
+
+class Param(object):
+    """This class converts a parameter dictionary to an object."""
+
+    def __init__(self, kwargs):
+        # list
+        self.inputfile = kwargs.get('inputfile')
+        self.task_args = kwargs.get('task-args')
+        self.task_args_file = kwargs.get('task-args-file')
+        self.keep_deploy = kwargs.get('keep-deploy')
+        self.parse_only = kwargs.get('parse-only')
+        self.output_file = kwargs.get('output-file', '/tmp/vnftest.out')
+        self.suite = kwargs.get('suite')
+        self.task_id = kwargs.get('task_id')
+        self.yaml_name = kwargs.get('yaml_name')
+
+        # list
+        self.input_file = kwargs.get('input_file')
+
+        # list
+        self.casename = kwargs.get('casename')
+
+        # list
+        self.type = kwargs.get('type')
diff --git a/vnftest/onap/core/plugin.py b/vnftest/onap/core/plugin.py
new file mode 100644
index 0000000..90b3a7e
--- /dev/null
+++ b/vnftest/onap/core/plugin.py
@@ -0,0 +1,175 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/plugin.py
+""" Handler for vnftest command 'plugin' """
+
+from __future__ import print_function
+from __future__ import absolute_import
+import os
+import sys
+import time
+import logging
+import pkg_resources
+import vnftest.ssh as ssh
+
+from vnftest.common.task_template import TaskTemplate
+from vnftest.common.yaml_loader import yaml_load
+
+LOG = logging.getLogger(__name__)
+
+
+class Plugin(object):
+    """Plugin commands.
+
+       Set of commands to manage plugins.
+    """
+
+    def install(self, args):
+        """Install a plugin."""
+
+        total_start_time = time.time()
+        parser = PluginParser(args.input_file[0])
+
+        plugins, deployment = parser.parse_plugin()
+        plugin_name = plugins.get("name")
+        LOG.info("Installing plugin: %s", plugin_name)
+
+        LOG.debug("Executing _install_setup()")
+        self._install_setup(plugin_name, deployment)
+
+        LOG.debug("Executing _run()")
+        self._run(plugin_name)
+
+        total_end_time = time.time()
+        LOG.info("Total finished in %d secs",
+                 total_end_time - total_start_time)
+
+        LOG.info("Plugin %s Done, exiting", plugin_name)
+
+    def remove(self, args):
+        """Remove a plugin."""
+
+        total_start_time = time.time()
+        parser = PluginParser(args.input_file[0])
+
+        plugins, deployment = parser.parse_plugin()
+        plugin_name = plugins.get("name")
+        print("Removing plugin: %s" % plugin_name)
+
+        LOG.info("Executing _remove_setup()")
+        self._remove_setup(plugin_name, deployment)
+
+        LOG.info("Executing _run()")
+        self._run(plugin_name)
+
+        total_end_time = time.time()
+        LOG.info("total finished in %d secs",
+                 total_end_time - total_start_time)
+
+        print("Done, exiting")
+
+    def _install_setup(self, plugin_name, deployment):
+        """Deployment environment setup"""
+        target_script = plugin_name + ".bash"
+        self.script = pkg_resources.resource_filename(
+            'vnftest.resources', 'scripts/install/' + target_script)
+
+        deployment_ip = deployment.get("ip", None)
+
+        if deployment_ip == "local":
+            self.client = ssh.SSH.from_node(deployment, overrides={
+                # host can't be None, fail if no JUMP_HOST_IP
+                'ip': os.environ["JUMP_HOST_IP"],
+            })
+        else:
+            self.client = ssh.SSH.from_node(deployment)
+        self.client.wait(timeout=600)
+
+        # copy script to host
+        remotepath = '~/%s.sh' % plugin_name
+
+        LOG.info("copying script to host: %s", remotepath)
+        self.client._put_file_shell(self.script, remotepath)
+
+    def _remove_setup(self, plugin_name, deployment):
+        """Deployment environment setup"""
+        target_script = plugin_name + ".bash"
+        self.script = pkg_resources.resource_filename(
+            'vnftest.resources', 'scripts/remove/' + target_script)
+
+        deployment_ip = deployment.get("ip", None)
+
+        if deployment_ip == "local":
+            self.client = ssh.SSH.from_node(deployment, overrides={
+                # host can't be None, fail if no JUMP_HOST_IP
+                'ip': os.environ["JUMP_HOST_IP"],
+            })
+        else:
+            self.client = ssh.SSH.from_node(deployment)
+        self.client.wait(timeout=600)
+
+        # copy script to host
+        remotepath = '~/%s.sh' % plugin_name
+
+        LOG.info("copying script to host: %s", remotepath)
+        self.client._put_file_shell(self.script, remotepath)
+
+    def _run(self, plugin_name):
+        """Run installation script """
+        cmd = "sudo bash %s" % plugin_name + ".sh"
+
+        LOG.info("Executing command: %s", cmd)
+        self.client.execute(cmd)
+
+
+class PluginParser(object):
+    """Parser for plugin configration files in yaml format"""
+
+    def __init__(self, path):
+        self.path = path
+
+    def parse_plugin(self):
+        """parses the plugin file and return a plugins instance
+           and a deployment instance
+        """
+
+        print("Parsing plugin config:", self.path)
+
+        try:
+            kw = {}
+            with open(self.path) as f:
+                try:
+                    input_plugin = f.read()
+                    rendered_plugin = TaskTemplate.render(input_plugin, **kw)
+                except Exception as e:
+                    print("Failed to render template:\n%(plugin)s\n%(err)s\n"
+                          % {"plugin": input_plugin, "err": e})
+                    raise e
+                print("Input plugin is:\n%s\n" % rendered_plugin)
+
+                cfg = yaml_load(rendered_plugin)
+        except IOError as ioerror:
+            sys.exit(ioerror)
+
+        self._check_schema(cfg["schema"], "plugin")
+
+        return cfg["plugins"], cfg["deployment"]
+
+    def _check_schema(self, cfg_schema, schema_type):
+        """Check if configration file is using the correct schema type"""
+
+        if cfg_schema != "vnftest:" + schema_type + ":0.1":
+            sys.exit("error: file %s has unknown schema %s" % (self.path,
+                                                               cfg_schema))
diff --git a/vnftest/onap/core/report.py b/vnftest/onap/core/report.py
new file mode 100644
index 0000000..bb791dc
--- /dev/null
+++ b/vnftest/onap/core/report.py
@@ -0,0 +1,128 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/report.py
+""" Handler for vnftest command 'report' """
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import ast
+import re
+import uuid
+
+from django.conf import settings
+from django.template import Context
+from django.template import Template
+from oslo_utils import encodeutils
+from oslo_utils import uuidutils
+
+from vnftest.common import constants as consts
+from vnftest.common.html_template import template
+from vnftest.common.utils import cliargs
+
+settings.configure()
+
+
+class Report(object):
+    """Report commands.
+
+    Set of commands to manage benchmark tasks.
+    """
+
+    def __init__(self):
+        self.Timestamp = []
+        self.yaml_name = ""
+        self.task_id = ""
+
+    def _validate(self, yaml_name, task_id):
+        if re.match("^[a-z0-9_-]+$", yaml_name):
+            self.yaml_name = yaml_name
+        else:
+            raise ValueError("invalid yaml_name", yaml_name)
+
+        if uuidutils.is_uuid_like(task_id):
+            task_id = '{' + task_id + '}'
+            task_uuid = (uuid.UUID(task_id))
+            self.task_id = task_uuid
+        else:
+            raise ValueError("invalid task_id", task_id)
+
+    # def _get_fieldkeys(self):
+        # fieldkeys_cmd = "show field keys from \"%s\""
+        # fieldkeys_query = fieldkeys_cmd % (self.yaml_name)
+        # query_exec = influx.query(fieldkeys_query)
+        # if query_exec:
+        #     return query_exec
+        # else:
+        #     raise KeyError("Task ID or Test case not found..")
+
+    #def _get_tasks(self):
+        # task_cmd = "select * from \"%s\" where task_id= '%s'"
+        # task_query = task_cmd % (self.yaml_name, self.task_id)
+        # query_exec = influx.query(task_query)
+        # if query_exec:
+        #     return query_exec
+        # else:
+        #     raise KeyError("Task ID or Test case not found..")
+
+    @cliargs("task_id", type=str, help=" task id", nargs=1)
+    @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
+    def generate(self, args):
+        """Start report generation."""
+        self._validate(args.yaml_name[0], args.task_id[0])
+
+        self.db_fieldkeys = self._get_fieldkeys()
+
+        self.db_task = self._get_tasks()
+
+        field_keys = []
+        temp_series = []
+        table_vals = {}
+
+        field_keys = [encodeutils.to_utf8(field['fieldKey'])
+                      for field in self.db_fieldkeys]
+
+        for key in field_keys:
+            self.Timestamp = []
+            series = {}
+            values = []
+            for task in self.db_task:
+                task_time = encodeutils.to_utf8(task['time'])
+                if not isinstance(task_time, str):
+                    task_time = str(task_time, 'utf8')
+                    key = str(key, 'utf8')
+                task_time = task_time[11:]
+                head, sep, tail = task_time.partition('.')
+                task_time = head + "." + tail[:6]
+                self.Timestamp.append(task_time)
+                if isinstance(task[key], float) is True:
+                    values.append(task[key])
+                else:
+                    values.append(ast.literal_eval(task[key]))
+            table_vals['Timestamp'] = self.Timestamp
+            table_vals[key] = values
+            series['name'] = key
+            series['data'] = values
+            temp_series.append(series)
+
+        Template_html = Template(template)
+        Context_html = Context({"series": temp_series,
+                                "Timestamp": self.Timestamp,
+                                "task_id": self.task_id,
+                                "table": table_vals})
+        with open(consts.DEFAULT_HTML_FILE, "w") as file_open:
+            file_open.write(Template_html.render(Context_html))
+
+        print("Report generated. View /tmp/vnftest.htm")
diff --git a/vnftest/onap/core/runner.py b/vnftest/onap/core/runner.py
new file mode 100644
index 0000000..32ec6e9
--- /dev/null
+++ b/vnftest/onap/core/runner.py
@@ -0,0 +1,44 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/runner.py
+""" Handler for vnftest command 'runner' """
+
+from __future__ import absolute_import
+
+import prettytable
+
+from vnftest.onap.runners.base import Runner
+
+
+class Runners(object):  # pragma: no cover
+    """Runner commands.
+
+       Set of commands to discover and display runner types.
+    """
+
+    def list_all(self, *args):
+        """List existing runner types"""
+        types = Runner.get_types()
+        runner_table = prettytable.PrettyTable(['Type', 'Description'])
+        runner_table.align = 'l'
+        for rtype in types:
+            runner_table.add_row([rtype.__execution_type__,
+                                  rtype.__doc__.split("\n")[0]])
+        print(runner_table)
+
+    def show(self, args):
+        """Show details of a specific runner type"""
+        rtype = Runner.get_cls(args.type[0])
+        print(rtype.__doc__)
diff --git a/vnftest/onap/core/step.py b/vnftest/onap/core/step.py
new file mode 100644
index 0000000..4411780
--- /dev/null
+++ b/vnftest/onap/core/step.py
@@ -0,0 +1,44 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/step.py
+
+""" Handler for vnftest command 'step' """
+
+from __future__ import absolute_import
+import prettytable
+
+from vnftest.onap.steps.base import Step
+
+
+class Steps(object):    # pragma: no cover
+    """Step commands.
+
+       Set of commands to discover and display step types.
+    """
+
+    def list_all(self, *args):
+        """List existing step types"""
+        types = Step.get_types()
+        step_table = prettytable.PrettyTable(['Type', 'Description'])
+        step_table.align = 'l'
+        for step_class in types:
+            step_table.add_row([step_class.get_step_type(),
+                                    step_class.get_description()])
+        print(step_table)
+
+    def show(self, args):
+        """Show details of a specific step type"""
+        stype = Step.get_cls(args.type[0])
+        print(stype.__doc__)
diff --git a/vnftest/onap/core/task.py b/vnftest/onap/core/task.py
new file mode 100644
index 0000000..2d3033a
--- /dev/null
+++ b/vnftest/onap/core/task.py
@@ -0,0 +1,605 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/task.py
+
+""" Handler for vnftest command 'task' """
+
+from __future__ import absolute_import
+from __future__ import print_function
+import sys
+import os
+from collections import OrderedDict
+
+import yaml
+import atexit
+import ipaddress
+import time
+import logging
+import uuid
+import collections
+
+from six.moves import filter
+from jinja2 import Environment
+
+from vnftest.onap.contexts.base import Context
+from vnftest.onap.contexts.csar import CSARContext
+from vnftest.onap.runners import base as base_runner
+from vnftest.onap.runners.duration import DurationRunner
+from vnftest.common.constants import CONF_FILE
+from vnftest.common.yaml_loader import yaml_load
+from vnftest.dispatcher.base import Base as DispatcherBase
+from vnftest.common.task_template import TaskTemplate
+from vnftest.common import utils
+from vnftest.common import constants
+from vnftest.common.html_template import report_template
+
+output_file_default = "/tmp/vnftest.out"
+test_cases_dir_default = "tests/onap/test_cases/"
+LOG = logging.getLogger(__name__)
+
+
+class Task(object):     # pragma: no cover
+    """Task commands.
+
+       Set of commands to manage benchmark tasks.
+    """
+
+    def __init__(self):
+        self.context = None
+        self.outputs = {}
+
+    def _set_dispatchers(self, output_config):
+        dispatchers = output_config.get('DEFAULT', {}).get('dispatcher',
+                                                           'file')
+        out_types = [s.strip() for s in dispatchers.split(',')]
+        output_config['DEFAULT']['dispatcher'] = out_types
+
+    def start(self, args, **kwargs):
+        """Start a vnf step."""
+
+        atexit.register(self.atexit_handler)
+
+        task_id = getattr(args, 'task_id')
+        self.task_id = task_id if task_id else str(uuid.uuid4())
+
+        self._set_log()
+
+        try:
+            output_config = utils.parse_ini_file(CONF_FILE)
+        except Exception:
+            # all error will be ignore, the default value is {}
+            output_config = {}
+
+        self._init_output_config(output_config)
+        self._set_output_config(output_config, args.output_file)
+        LOG.debug('Output configuration is: %s', output_config)
+
+        self._set_dispatchers(output_config)
+
+        # update dispatcher list
+        if 'file' in output_config['DEFAULT']['dispatcher']:
+            result = {'status': 0, 'result': {}}
+            utils.write_json_to_file(args.output_file, result)
+
+        total_start_time = time.time()
+        parser = TaskParser(args.inputfile[0])
+
+        if args.suite:
+            # 1.parse suite, return suite_params info
+            task_files, task_args, task_args_fnames = \
+                parser.parse_suite()
+        else:
+            task_files = [parser.path]
+            task_args = [args.task_args]
+            task_args_fnames = [args.task_args_file]
+
+        LOG.debug("task_files:%s, task_args:%s, task_args_fnames:%s",
+                  task_files, task_args, task_args_fnames)
+
+        if args.parse_only:
+            sys.exit(0)
+
+        testcases = {}
+        # parse task_files
+        for i in range(0, len(task_files)):
+            one_task_start_time = time.time()
+            parser.path = task_files[i]
+            steps, run_in_parallel, meet_precondition, ret_context = \
+                parser.parse_task(self.task_id, task_args[i],
+                                  task_args_fnames[i])
+
+            self.context = ret_context
+
+            if not meet_precondition:
+                LOG.info("meet_precondition is %s, please check envrionment",
+                         meet_precondition)
+                continue
+
+            case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
+            try:
+                data = self._run(steps, run_in_parallel, args.output_file)
+            except KeyboardInterrupt:
+                raise
+            except Exception:
+                LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True)
+                testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
+            else:
+                LOG.info('Testcase: "%s" SUCCESS!!!', case_name)
+                testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}
+
+            if args.keep_deploy:
+                # keep deployment, forget about stack
+                # (hide it for exit handler)
+                self.context = None
+            else:
+                self.context.undeploy()
+                self.context = None
+            one_task_end_time = time.time()
+            LOG.info("Task %s finished in %d secs", task_files[i],
+                     one_task_end_time - one_task_start_time)
+
+        result = self._get_format_result(testcases)
+
+        self._do_output(output_config, result)
+        self._generate_reporting(result)
+
+        total_end_time = time.time()
+        LOG.info("Total finished in %d secs",
+                 total_end_time - total_start_time)
+
+        step = steps[0]
+        LOG.info("To generate report, execute command "
+                 "'vnftest report generate %(task_id)s %(tc)s'", step)
+        LOG.info("Task ALL DONE, exiting")
+        return result
+
+    def _generate_reporting(self, result):
+        env = Environment()
+        with open(constants.REPORTING_FILE, 'w') as f:
+            f.write(env.from_string(report_template).render(result))
+
+        LOG.info("Report can be found in '%s'", constants.REPORTING_FILE)
+
+    def _set_log(self):
+        log_format = '%(asctime)s %(name)s %(filename)s:%(lineno)d %(levelname)s %(message)s'
+        log_formatter = logging.Formatter(log_format)
+
+        utils.makedirs(constants.TASK_LOG_DIR)
+        log_path = os.path.join(constants.TASK_LOG_DIR, '{}.log'.format(self.task_id))
+        log_handler = logging.FileHandler(log_path)
+        log_handler.setFormatter(log_formatter)
+        log_handler.setLevel(logging.DEBUG)
+
+        logging.root.addHandler(log_handler)
+
+    def _init_output_config(self, output_config):
+        output_config.setdefault('DEFAULT', {})
+        output_config.setdefault('dispatcher_http', {})
+        output_config.setdefault('dispatcher_file', {})
+        output_config.setdefault('dispatcher_influxdb', {})
+        output_config.setdefault('nsb', {})
+
+    def _set_output_config(self, output_config, file_path):
+        try:
+            out_type = os.environ['DISPATCHER']
+        except KeyError:
+            output_config['DEFAULT'].setdefault('dispatcher', 'file')
+        else:
+            output_config['DEFAULT']['dispatcher'] = out_type
+
+        output_config['dispatcher_file']['file_path'] = file_path
+
+        try:
+            target = os.environ['TARGET']
+        except KeyError:
+            pass
+        else:
+            k = 'dispatcher_{}'.format(output_config['DEFAULT']['dispatcher'])
+            output_config[k]['target'] = target
+
+    def _get_format_result(self, testcases):
+        criteria = self._get_task_criteria(testcases)
+
+        info = {
+            'deploy_step': os.environ.get('DEPLOY_STEP', 'unknown'),
+            'installer': os.environ.get('INSTALLER_TYPE', 'unknown'),
+            'pod_name': os.environ.get('NODE_NAME', 'unknown'),
+            'version': os.environ.get('VNFTEST_BRANCH', 'unknown')
+        }
+
+        result = {
+            'status': 1,
+            'result': {
+                'criteria': criteria,
+                'task_id': self.task_id,
+                'info': info,
+                'testcases': testcases
+            }
+        }
+
+        return result
+
+    def _get_task_criteria(self, testcases):
+        criteria = any(t.get('criteria') != 'PASS' for t in testcases.values())
+        if criteria:
+            return 'FAIL'
+        else:
+            return 'PASS'
+
+    def _do_output(self, output_config, result):
+        dispatchers = DispatcherBase.get(output_config)
+
+        for dispatcher in dispatchers:
+            dispatcher.flush_result_data(result)
+
+    def _run(self, steps, run_in_parallel, output_file):
+        """Deploys context and calls runners"""
+        self.context.deploy()
+        background_runners = []
+
+        result = []
+        # Start all background steps
+        for step in filter(_is_background_step, steps):
+            step["runner"] = dict(type="Duration", duration=1000000000)
+            runner = self.run_one_step(step, output_file)
+            background_runners.append(runner)
+
+        runners = []
+        if run_in_parallel:
+            for step in steps:
+                if not _is_background_step(step):
+                    runner = self.run_one_step(step, output_file)
+                    runners.append(runner)
+
+            # Wait for runners to finish
+            for runner in runners:
+                status = runner_join(runner, background_runners, self.outputs, result)
+                if status != 0:
+                    raise RuntimeError(
+                        "{0} runner status {1}".format(runner.__execution_type__, status))
+                LOG.info("Runner ended, output in %s", output_file)
+        else:
+            # run serially
+            for step in steps:
+                if not _is_background_step(step):
+                    runner = self.run_one_step(step, output_file)
+                    status = runner_join(runner, background_runners, self.outputs, result)
+                    if status != 0:
+                        LOG.error('Step NO.%s: "%s" ERROR!',
+                                  steps.index(step) + 1,
+                                  step.get('type'))
+                        raise RuntimeError(
+                            "{0} runner status {1}".format(runner.__execution_type__, status))
+                    LOG.info("Runner ended, output in %s", output_file)
+
+        # Abort background runners
+        for runner in background_runners:
+            runner.abort()
+
+        # Wait for background runners to finish
+        for runner in background_runners:
+            status = runner.join(self.outputs, result)
+            if status is None:
+                # Nuke if it did not stop nicely
+                base_runner.Runner.terminate(runner)
+                runner.join(self.outputs, result)
+            base_runner.Runner.release(runner)
+
+            print("Background task ended")
+        return result
+
+    def atexit_handler(self):
+        """handler for process termination"""
+        base_runner.Runner.terminate_all()
+
+        if self.context:
+            LOG.info("Undeploying context")
+            self.context.undeploy()
+
+    def _parse_options(self, op):
+        if isinstance(op, dict):
+            return {k: self._parse_options(v) for k, v in op.items()}
+        elif isinstance(op, list):
+            return [self._parse_options(v) for v in op]
+        elif isinstance(op, str):
+            return self.outputs.get(op[1:]) if op.startswith('$') else op
+        else:
+            return op
+
+    def run_one_step(self, step_cfg, output_file):
+        """run one step using context"""
+        # default runner is Duration
+        if 'runner' not in step_cfg:
+            step_cfg['runner'] = dict(type="Duration", duration=1000000000)
+        runner_cfg = step_cfg['runner']
+        runner_cfg['output_filename'] = output_file
+        options = step_cfg.get('options', {})
+        step_cfg['options'] = self._parse_options(options)
+        runner = base_runner.Runner.get(runner_cfg)
+
+        LOG.info("Starting runner of type '%s'", runner_cfg["type"])
+        runner.run(step_cfg, self.context)
+        return runner
+
+
+class TaskParser(object):       # pragma: no cover
+    """Parser for task config files in yaml format"""
+
+    def __init__(self, path):
+        self.path = path
+
+    def _meet_constraint(self, task, cur_pod, cur_installer):
+        if "constraint" in task:
+            constraint = task.get('constraint', None)
+            if constraint is not None:
+                tc_fit_pod = constraint.get('pod', None)
+                tc_fit_installer = constraint.get('installer', None)
+                LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
+                         cur_pod, cur_installer, constraint)
+                if (cur_pod is None) or (tc_fit_pod and cur_pod not in tc_fit_pod):
+                    return False
+                if (cur_installer is None) or (tc_fit_installer and cur_installer
+                                               not in tc_fit_installer):
+                    return False
+        return True
+
+    def _get_task_para(self, task, cur_pod):
+        task_args = task.get('task_args', None)
+        if task_args is not None:
+            task_args = task_args.get(cur_pod, task_args.get('default'))
+        task_args_fnames = task.get('task_args_fnames', None)
+        if task_args_fnames is not None:
+            task_args_fnames = task_args_fnames.get(cur_pod, None)
+        return task_args, task_args_fnames
+
+    def parse_suite(self):
+        """parse the suite file and return a list of task config file paths
+           and lists of optional parameters if present"""
+        LOG.info("\nParsing suite file:%s", self.path)
+
+        try:
+            with open(self.path) as stream:
+                cfg = yaml_load(stream)
+        except IOError as ioerror:
+            sys.exit(ioerror)
+
+        self._check_schema(cfg["schema"], "suite")
+        LOG.info("\nStarting step:%s", cfg["name"])
+
+        test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default)
+        test_cases_dir = os.path.join(constants.VNFTEST_ROOT_PATH,
+                                      test_cases_dir)
+        if test_cases_dir[-1] != os.sep:
+            test_cases_dir += os.sep
+
+        cur_pod = os.environ.get('NODE_NAME', None)
+        cur_installer = os.environ.get('INSTALLER_TYPE', None)
+
+        valid_task_files = []
+        valid_task_args = []
+        valid_task_args_fnames = []
+
+        for task in cfg["test_cases"]:
+            # 1.check file_name
+            if "file_name" in task:
+                task_fname = task.get('file_name', None)
+                if task_fname is None:
+                    continue
+            else:
+                continue
+            # 2.check constraint
+            if self._meet_constraint(task, cur_pod, cur_installer):
+                valid_task_files.append(test_cases_dir + task_fname)
+            else:
+                continue
+            # 3.fetch task parameters
+            task_args, task_args_fnames = self._get_task_para(task, cur_pod)
+            valid_task_args.append(task_args)
+            valid_task_args_fnames.append(task_args_fnames)
+
+        return valid_task_files, valid_task_args, valid_task_args_fnames
+
+    def parse_task(self, task_id, task_args=None, task_args_file=None):
+        """parses the task file and return an context and step instances"""
+        LOG.info("Parsing task config: %s", self.path)
+
+        try:
+            kw = {}
+            if task_args_file:
+                with open(task_args_file) as f:
+                    kw.update(parse_task_args("task_args_file", f.read()))
+            kw.update(parse_task_args("task_args", task_args))
+        except TypeError:
+            raise TypeError()
+
+        try:
+            with open(self.path) as f:
+                try:
+                    input_task = f.read()
+                    rendered_task = TaskTemplate.render(input_task, **kw)
+                except Exception as e:
+                    LOG.exception('Failed to render template:\n%s\n', input_task)
+                    raise e
+                LOG.debug("Input task is:\n%s\n", rendered_task)
+
+                cfg = yaml_load(rendered_task)
+        except IOError as ioerror:
+            sys.exit(ioerror)
+
+        self._check_schema(cfg["schema"], "task")
+        meet_precondition = self._check_precondition(cfg)
+
+        if "context" in cfg:
+            context_cfg = cfg["context"]
+        else:
+            context_cfg = {"type": "Dummy"}
+
+        name_suffix = '-{}'.format(task_id[:8])
+        try:
+            context_cfg['name'] = '{}{}'.format(context_cfg['name'],
+                                              name_suffix)
+        except KeyError:
+            pass
+        # default to CSAR context
+        context_type = context_cfg.get("type", "CSAR")
+        context = Context.get(context_type)
+        context.init(context_cfg)
+
+        run_in_parallel = cfg.get("run_in_parallel", False)
+
+        # add tc and task id for influxdb extended tags
+        for step in cfg["steps"]:
+            task_name = os.path.splitext(os.path.basename(self.path))[0]
+            step["tc"] = task_name
+            step["task_id"] = task_id
+            # embed task path into step so we can load other files
+            # relative to task path
+            step["task_path"] = os.path.dirname(self.path)
+
+        # TODO we need something better here, a class that represent the file
+        return cfg["steps"], run_in_parallel, meet_precondition, context
+
+    def _check_schema(self, cfg_schema, schema_type):
+        """Check if config file is using the correct schema type"""
+
+        if cfg_schema != "vnftest:" + schema_type + ":0.1":
+            sys.exit("error: file %s has unknown schema %s" % (self.path,
+                                                               cfg_schema))
+
+    def _check_precondition(self, cfg):
+        """Check if the environment meet the precondition"""
+
+        if "precondition" in cfg:
+            precondition = cfg["precondition"]
+            installer_type = precondition.get("installer_type", None)
+            deploy_steps = precondition.get("deploy_steps", None)
+            tc_fit_pods = precondition.get("pod_name", None)
+            installer_type_env = os.environ.get('INSTALL_TYPE', None)
+            deploy_step_env = os.environ.get('DEPLOY_STEP', None)
+            pod_name_env = os.environ.get('NODE_NAME', None)
+
+            LOG.info("installer_type: %s, installer_type_env: %s",
+                     installer_type, installer_type_env)
+            LOG.info("deploy_steps: %s, deploy_step_env: %s",
+                     deploy_steps, deploy_step_env)
+            LOG.info("tc_fit_pods: %s, pod_name_env: %s",
+                     tc_fit_pods, pod_name_env)
+            if installer_type and installer_type_env:
+                if installer_type_env not in installer_type:
+                    return False
+            if deploy_steps and deploy_step_env:
+                deploy_steps_list = deploy_steps.split(',')
+                for deploy_step in deploy_steps_list:
+                    if deploy_step_env.startswith(deploy_step):
+                        return True
+                return False
+            if tc_fit_pods and pod_name_env:
+                if pod_name_env not in tc_fit_pods:
+                    return False
+        return True
+
+
+def is_ip_addr(addr):
+    """check if string addr is an IP address"""
+    try:
+        addr = addr.get('public_ip_attr', addr.get('private_ip_attr'))
+    except AttributeError:
+        pass
+
+    try:
+        ipaddress.ip_address(addr.encode('utf-8'))
+    except ValueError:
+        return False
+    else:
+        return True
+
+
+def _is_background_step(step):
+    if "run_in_background" in step:
+        return step["run_in_background"]
+    else:
+        return False
+
+
+def parse_nodes_with_context(step_cfg):
+    """parse the 'nodes' fields in step """
+    # ensure consistency in node instantiation order
+    return OrderedDict((nodename, Context.get_server(step_cfg["nodes"][nodename]))
+                       for nodename in sorted(step_cfg["nodes"]))
+
+
+def get_networks_from_nodes(nodes):
+    """parse the 'nodes' fields in step """
+    networks = {}
+    for node in nodes.values():
+        if not node:
+            continue
+        interfaces = node.get('interfaces', {})
+        for interface in interfaces.values():
+            # vld_id is network_name
+            network_name = interface.get('network_name')
+            if not network_name:
+                continue
+            network = Context.get_network(network_name)
+            if network:
+                networks[network['name']] = network
+    return networks
+
+
+def runner_join(runner, background_runners, outputs, result):
+    """join (wait for) a runner, exit process at runner failure
+    :param background_runners:
+    :type background_runners:
+    :param outputs:
+    :type outputs: dict
+    :param result:
+    :type result: list
+    """
+    while runner.poll() is None:
+        outputs.update(runner.get_output())
+        result.extend(runner.get_result())
+        # drain all the background runner queues
+        for background in background_runners:
+            outputs.update(background.get_output())
+            result.extend(background.get_result())
+    status = runner.join(outputs, result)
+    base_runner.Runner.release(runner)
+    return status
+
+
+def print_invalid_header(source_name, args):
+    print("Invalid %(source)s passed:\n\n %(args)s\n"
+          % {"source": source_name, "args": args})
+
+
+def parse_task_args(src_name, args):
+    if isinstance(args, collections.Mapping):
+        return args
+
+    try:
+        kw = args and yaml_load(args)
+        kw = {} if kw is None else kw
+    except yaml.parser.ParserError as e:
+        print_invalid_header(src_name, args)
+        print("%(source)s has to be YAML. Details:\n\n%(err)s\n"
+              % {"source": src_name, "err": e})
+        raise TypeError()
+
+    if not isinstance(kw, dict):
+        print_invalid_header(src_name, args)
+        print("%(src)s had to be dict, actually %(src_type)s\n"
+              % {"src": src_name, "src_type": type(kw)})
+        raise TypeError()
+    return kw
\ No newline at end of file
diff --git a/vnftest/onap/core/testcase.py b/vnftest/onap/core/testcase.py
new file mode 100644
index 0000000..ef3e535
--- /dev/null
+++ b/vnftest/onap/core/testcase.py
@@ -0,0 +1,113 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/testcase.py
+
+""" Handler for vnftest command 'testcase' """
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+import logging
+
+from vnftest.common.task_template import TaskTemplate
+from vnftest.common import constants as consts
+from vnftest.common.yaml_loader import yaml_load
+
+LOG = logging.getLogger(__name__)
+
+
+class Testcase(object):
+    """Testcase commands.
+
+       Set of commands to discover and display test cases.
+    """
+
+    def list_all(self, args):
+        """List existing test cases"""
+
+        testcase_files = self._get_testcase_file_list()
+        testcase_list = [self._get_record(f) for f in testcase_files]
+
+        return testcase_list
+
+    def _get_testcase_file_list(self):
+        try:
+            testcase_files = sorted(os.listdir(consts.TESTCASE_DIR))
+        except OSError:
+            LOG.exception('Failed to list dir:\n%s\n', consts.TESTCASE_DIR)
+            raise
+
+        return testcase_files
+
+    def _get_record(self, testcase_file):
+
+        file_path = os.path.join(consts.TESTCASE_DIR, testcase_file)
+        with open(file_path) as f:
+            try:
+                testcase_info = f.read()
+            except IOError:
+                LOG.exception('Failed to load test case:\n%s\n', testcase_file)
+                raise
+
+        description, installer, deploy_steps = self._parse_testcase(
+            testcase_info)
+
+        record = {
+            'Name': testcase_file.split(".")[0],
+            'Description': description,
+            'installer': installer,
+            'deploy_steps': deploy_steps
+        }
+
+        return record
+
+    def _parse_testcase(self, testcase_info):
+
+        rendered_testcase = TaskTemplate.render(testcase_info)
+        testcase_cfg = yaml_load(rendered_testcase)
+
+        test_precondition = testcase_cfg.get('precondition', {})
+        installer_type = test_precondition.get('installer_type', 'all')
+        deploy_steps = test_precondition.get('deploy_steps', 'all')
+
+        description = self._get_description(testcase_cfg)
+
+        return description, installer_type, deploy_steps
+
+    def _get_description(self, testcase_cfg):
+        try:
+            description_list = testcase_cfg['description'].split(';')
+        except KeyError:
+            return ''
+        else:
+            try:
+                return description_list[1].replace(os.linesep, '').strip()
+            except IndexError:
+                return description_list[0].replace(os.linesep, '').strip()
+
+    def show(self, args):
+        """Show details of a specific test case"""
+        testcase_name = args.casename[0]
+        testcase_path = os.path.join(consts.TESTCASE_DIR,
+                                     testcase_name + ".yaml")
+        with open(testcase_path) as f:
+            try:
+                testcase_info = f.read()
+            except IOError:
+                LOG.exception('Failed to load test case:\n%s\n', testcase_path)
+                raise
+
+            print(testcase_info)
+        return True
diff --git a/vnftest/onap/core/testsuite.py b/vnftest/onap/core/testsuite.py
new file mode 100644
index 0000000..986982a
--- /dev/null
+++ b/vnftest/onap/core/testsuite.py
@@ -0,0 +1,49 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/testsuite.py
+
+""" Handler for vnftest command 'testcase' """
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+import logging
+
+from vnftest.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+
+
+class Testsuite(object):
+    """Testcase commands.
+
+       Set of commands to discover and display test cases.
+    """
+
+    def list_all(self, args):
+        """List existing test cases"""
+
+        testsuite_list = self._get_testsuite_file_list()
+
+        return testsuite_list
+
+    def _get_testsuite_file_list(self):
+        try:
+            testsuite_files = sorted(os.listdir(consts.TESTSUITE_DIR))
+        except OSError:
+            LOG.exception('Failed to list dir:\n%s\n', consts.TESTSUITE_DIR)
+            raise
+
+        return testsuite_files
diff --git a/vnftest/onap/runners/__init__.py b/vnftest/onap/runners/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/onap/runners/__init__.py
diff --git a/vnftest/onap/runners/base.py b/vnftest/onap/runners/base.py
new file mode 100755
index 0000000..5170bbe
--- /dev/null
+++ b/vnftest/onap/runners/base.py
@@ -0,0 +1,250 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/runners/base.py
+
+from __future__ import absolute_import
+import logging
+import multiprocessing
+import subprocess
+import time
+import traceback
+import importlib
+from Queue import Empty
+
+import vnftest.common.utils as utils
+from vnftest.onap.steps import base as base_step
+from vnftest.onap.steps.onap_api_call import OnapApiCall
+
+log = logging.getLogger(__name__)
+
+
+def _execute_shell_command(command):
+    """execute shell script with error handling"""
+    exitcode = 0
+    try:
+        output = subprocess.check_output(command, shell=True)
+    except Exception:
+        exitcode = -1
+        output = traceback.format_exc()
+        log.error("exec command '%s' error:\n ", command)
+        log.error(traceback.format_exc())
+
+    return exitcode, output
+
+
+def _single_action(seconds, command, queue):
+    """entrypoint for the single action process"""
+    log.debug("single action, fires after %d seconds (from now)", seconds)
+    time.sleep(seconds)
+    log.debug("single action: executing command: '%s'", command)
+    ret_code, data = _execute_shell_command(command)
+    if ret_code < 0:
+        log.error("single action error! command:%s", command)
+        queue.put({'single-action-data': data})
+        return
+    log.debug("single action data: \n%s", data)
+    queue.put({'single-action-data': data})
+
+
+def _periodic_action(interval, command, queue):
+    """entrypoint for the periodic action process"""
+    log.debug("periodic action, fires every: %d seconds", interval)
+    time_spent = 0
+    while True:
+        time.sleep(interval)
+        time_spent += interval
+        log.debug("periodic action, executing command: '%s'", command)
+        ret_code, data = _execute_shell_command(command)
+        if ret_code < 0:
+            log.error("periodic action error! command:%s", command)
+            queue.put({'periodic-action-data': data})
+            break
+        log.debug("periodic action data: \n%s", data)
+        queue.put({'periodic-action-data': data})
+
+
+class Runner(object):
+    runners = []
+
+    @staticmethod
+    def get_cls(runner_type):
+        """return class of specified type"""
+        for runner in utils.itersubclasses(Runner):
+            if runner_type == runner.__execution_type__:
+                return runner
+        raise RuntimeError("No such runner_type %s" % runner_type)
+
+    @staticmethod
+    def get_types():
+        """return a list of known runner type (class) names"""
+        types = []
+        for runner in utils.itersubclasses(Runner):
+            types.append(runner)
+        return types
+
+    @staticmethod
+    def get(runner_cfg):
+        """Returns instance of a step runner for execution type.
+        """
+        return Runner.get_cls(runner_cfg["type"])(runner_cfg)
+
+    @staticmethod
+    def release(runner):
+        """Release the runner"""
+        if runner in Runner.runners:
+            Runner.runners.remove(runner)
+
+    @staticmethod
+    def terminate(runner):
+        """Terminate the runner"""
+        if runner.process and runner.process.is_alive():
+            runner.process.terminate()
+
+    @staticmethod
+    def terminate_all():
+        """Terminate all runners (subprocesses)"""
+        log.debug("Terminating all runners", exc_info=True)
+
+        # release dumper process as some errors before any runner is created
+        if not Runner.runners:
+            return
+
+        for runner in Runner.runners:
+            log.debug("Terminating runner: %s", runner)
+            if runner.process:
+                runner.process.terminate()
+                runner.process.join()
+            if runner.periodic_action_process:
+                log.debug("Terminating periodic action process")
+                runner.periodic_action_process.terminate()
+                runner.periodic_action_process = None
+            Runner.release(runner)
+
+    def __init__(self, config):
+        self.config = config
+        self.periodic_action_process = None
+        self.output_queue = multiprocessing.Queue()
+        self.result_queue = multiprocessing.Queue()
+        self.process = None
+        self.aborted = multiprocessing.Event()
+        Runner.runners.append(self)
+
+    def run_post_stop_action(self):
+        """run a potentially configured post-stop action"""
+        if "post-stop-action" in self.config:
+            command = self.config["post-stop-action"]["command"]
+            log.debug("post stop action: command: '%s'", command)
+            ret_code, data = _execute_shell_command(command)
+            if ret_code < 0:
+                log.error("post action error! command:%s", command)
+                self.result_queue.put({'post-stop-action-data': data})
+                return
+            log.debug("post-stop data: \n%s", data)
+            self.result_queue.put({'post-stop-action-data': data})
+
+    def _run_step(self, cls, method_name, step_cfg, context_cfg):
+        raise NotImplementedError
+
+    def run(self, step_cfg, context_cfg):
+        step_type = step_cfg["type"]
+        class_name = base_step.Step.get(step_type)
+        path_split = class_name.split(".")
+        module_path = ".".join(path_split[:-1])
+        module = importlib.import_module(module_path)
+        cls = getattr(module, path_split[-1])
+
+        self.config['object'] = class_name
+        self.aborted.clear()
+
+        # run a potentially configured pre-start action
+        if "pre-start-action" in self.config:
+            command = self.config["pre-start-action"]["command"]
+            log.debug("pre start action: command: '%s'", command)
+            ret_code, data = _execute_shell_command(command)
+            if ret_code < 0:
+                log.error("pre-start action error! command:%s", command)
+                self.result_queue.put({'pre-start-action-data': data})
+                return
+            log.debug("pre-start data: \n%s", data)
+            self.result_queue.put({'pre-start-action-data': data})
+
+        if "single-shot-action" in self.config:
+            single_action_process = multiprocessing.Process(
+                target=_single_action,
+                name="single-shot-action",
+                args=(self.config["single-shot-action"]["after"],
+                      self.config["single-shot-action"]["command"],
+                      self.result_queue))
+            single_action_process.start()
+
+        if "periodic-action" in self.config:
+            self.periodic_action_process = multiprocessing.Process(
+                target=_periodic_action,
+                name="periodic-action",
+                args=(self.config["periodic-action"]["interval"],
+                      self.config["periodic-action"]["command"],
+                      self.result_queue))
+            self.periodic_action_process.start()
+
+        self._run_step(cls, "run", step_cfg, context_cfg)
+
+    def abort(self):
+        """Abort the execution of a step"""
+        self.aborted.set()
+
+    QUEUE_JOIN_INTERVAL = 5
+
+    def poll(self, timeout=QUEUE_JOIN_INTERVAL):
+        self.process.join(timeout)
+        return self.process.exitcode
+
+    def join(self, outputs, result, interval=QUEUE_JOIN_INTERVAL):
+        while self.process.exitcode is None:
+            # drain the queue while we are running otherwise we won't terminate
+            outputs.update(self.get_output())
+            result.extend(self.get_result())
+            self.process.join(interval)
+        # drain after the process has exited
+        outputs.update(self.get_output())
+        result.extend(self.get_result())
+
+        self.process.terminate()
+        if self.periodic_action_process:
+            self.periodic_action_process.join(1)
+            self.periodic_action_process.terminate()
+            self.periodic_action_process = None
+
+        self.run_post_stop_action()
+        return self.process.exitcode
+
+    def get_output(self):
+        result = {}
+        while not self.output_queue.empty():
+            log.debug("output_queue size %s", self.output_queue.qsize())
+            try:
+                result.update(self.output_queue.get(True, 1))
+            except Empty:
+                pass
+        return result
+
+    def get_result(self):
+        result = []
+        while not self.result_queue.empty():
+            log.debug("result_queue size %s", self.result_queue.qsize())
+            try:
+                result.append(self.result_queue.get(True, 1))
+            except Empty:
+                pass
+        return result
diff --git a/vnftest/onap/runners/duration.py b/vnftest/onap/runners/duration.py
new file mode 100644
index 0000000..7e539e5
--- /dev/null
+++ b/vnftest/onap/runners/duration.py
@@ -0,0 +1,145 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/runners/duration.py
+
+"""A runner that runs a specific time before it returns
+"""
+
+from __future__ import absolute_import
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from vnftest.onap.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+QUEUE_PUT_TIMEOUT = 10
+
+
+def _worker_process(queue, cls, method_name, step_cfg,
+                    context_cfg, aborted, output_queue):
+
+    sequence = 1
+
+    runner_cfg = step_cfg['runner']
+
+    interval = runner_cfg.get("interval", 1)
+    duration = runner_cfg.get("duration", 60)
+    LOG.info("Worker START, duration is %ds", duration)
+    LOG.debug("class is %s", cls)
+
+    runner_cfg['runner_id'] = os.getpid()
+
+    step = cls(step_cfg, context_cfg)
+    step.setup()
+    method = getattr(step, method_name)
+
+    sla_action = None
+    if "sla" in step_cfg:
+        sla_action = step_cfg["sla"].get("action", "assert")
+
+    start = time.time()
+    timeout = start + duration
+    while True:
+
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
+                  {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+        data = {}
+        errors = ""
+
+        try:
+            result = method(data)
+        except AssertionError as assertion:
+            # SLA validation failed in scenario, determine what to do now
+            if sla_action == "assert":
+                raise
+            elif sla_action == "monitor":
+                LOG.warning("SLA validation failed: %s", assertion.args)
+                errors = assertion.args
+        # catch all exceptions because with multiprocessing we can have un-picklable exception
+        # problems  https://bugs.python.org/issue9400
+        except Exception:
+            errors = traceback.format_exc()
+            LOG.exception("")
+        else:
+            if result:
+                # add timeout for put so we don't block test
+                # if we do timeout we don't care about dropping individual KPIs
+                output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
+
+        time.sleep(interval)
+
+        step_output = {
+            'timestamp': time.time(),
+            'sequence': sequence,
+            'data': data,
+            'errors': errors
+        }
+
+        queue.put(step_output, True, QUEUE_PUT_TIMEOUT)
+
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
+                  {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+        sequence += 1
+
+        if (errors and sla_action is None) or time.time() > timeout or aborted.is_set():
+            LOG.info("Worker END")
+            break
+
+    try:
+        step.teardown()
+    except Exception:
+        # catch any exception in teardown and convert to simple exception
+        # never pass exceptions back to multiprocessing, because some exceptions can
+        # be unpicklable
+        # https://bugs.python.org/issue9400
+        LOG.exception("")
+        raise SystemExit(1)
+
+    LOG.debug("queue.qsize() = %s", queue.qsize())
+    LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
+
+
+class DurationRunner(base.Runner):
+    """Run a scenario for a certain amount of time
+
+If the scenario ends before the time has elapsed, it will be started again.
+
+  Parameters
+    duration - amount of time the scenario will be run for
+        type:    int
+        unit:    seconds
+        default: 1 sec
+    interval - time to wait between each scenario invocation
+        type:    int
+        unit:    seconds
+        default: 1 sec
+    """
+    __execution_type__ = 'Duration'
+
+    def _run_step(self, cls, method, step_cfg, context_cfg):
+        name = "{}-{}-{}".format(self.__execution_type__, step_cfg.get("type"), os.getpid())
+        self.process = multiprocessing.Process(
+            name=name,
+            target=_worker_process,
+            args=(self.result_queue, cls, method, step_cfg,
+                  context_cfg, self.aborted, self.output_queue))
+        self.process.start()
diff --git a/vnftest/onap/runners/dynamictp.py b/vnftest/onap/runners/dynamictp.py
new file mode 100755
index 0000000..5ea0910
--- /dev/null
+++ b/vnftest/onap/runners/dynamictp.py
@@ -0,0 +1,179 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/step/runners/dynamictp.py
+
+"""A runner that searches for the max throughput with binary search
+"""
+
+import logging
+import multiprocessing
+import time
+import traceback
+
+import os
+
+from vnftest.onap.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+def _worker_process(queue, cls, method_name, step_cfg,
+                    context_cfg, aborted):  # pragma: no cover
+
+    runner_cfg = step_cfg['runner']
+    iterations = runner_cfg.get("iterations", 1)
+    interval = runner_cfg.get("interval", 1)
+    run_step = runner_cfg.get("run_step", "setup,run,teardown")
+    delta = runner_cfg.get("delta", 1000)
+    options_cfg = step_cfg['options']
+    initial_rate = options_cfg.get("pps", 1000000)
+    LOG.info("worker START, class %s", cls)
+
+    runner_cfg['runner_id'] = os.getpid()
+
+    step = cls(step_cfg, context_cfg)
+    if "setup" in run_step:
+        step.setup()
+
+    method = getattr(step, method_name)
+
+    queue.put({'runner_id': runner_cfg['runner_id'],
+               'step_cfg': step_cfg,
+               'context_cfg': context_cfg})
+
+    if "run" in run_step:
+        iterator = 0
+        search_max = initial_rate
+        search_min = 0
+        while iterator < iterations:
+            search_min = int(search_min / 2)
+            step_cfg['options']['pps'] = search_max
+            search_max_found = False
+            max_throuput_found = False
+            sequence = 0
+
+            last_min_data = {'packets_per_second': 0}
+
+            while True:
+                sequence += 1
+
+                data = {}
+                errors = ""
+                too_high = False
+
+                LOG.debug("sequence: %s search_min: %s search_max: %s",
+                          sequence, search_min, search_max)
+
+                try:
+                    method(data)
+                except AssertionError as assertion:
+                    LOG.warning("SLA validation failed: %s" % assertion.args)
+                    too_high = True
+                except Exception as e:
+                    errors = traceback.format_exc()
+                    LOG.exception(e)
+
+                actual_pps = data['packets_per_second']
+
+                if too_high:
+                    search_max = actual_pps
+
+                    if not search_max_found:
+                        search_max_found = True
+                else:
+                    last_min_data = data
+                    search_min = actual_pps
+
+                    # Check if the actual rate is well below the asked rate
+                    if step_cfg['options']['pps'] > actual_pps * 1.5:
+                        search_max = actual_pps
+                        LOG.debug("Sender reached max tput: %s", search_max)
+                    elif not search_max_found:
+                        search_max = int(actual_pps * 1.5)
+
+                if ((search_max - search_min) < delta) or \
+                   (search_max <= search_min) or (10 <= sequence):
+                    if last_min_data['packets_per_second'] > 0:
+                        data = last_min_data
+
+                    step_output = {
+                        'timestamp': time.time(),
+                        'sequence': sequence,
+                        'data': data,
+                        'errors': errors
+                    }
+
+                    record = {
+                        'runner_id': runner_cfg['runner_id'],
+                        'step': step_output
+                    }
+
+                    queue.put(record)
+                    max_throuput_found = True
+
+                if errors or aborted.is_set() or max_throuput_found:
+                    LOG.info("worker END")
+                    break
+
+                if not search_max_found:
+                    step_cfg['options']['pps'] = search_max
+                else:
+                    step_cfg['options']['pps'] = \
+                        (search_max - search_min) / 2 + search_min
+
+                time.sleep(interval)
+
+            iterator += 1
+            LOG.debug("iterator: %s iterations: %s", iterator, iterations)
+
+    if "teardown" in run_step:
+        try:
+            step.teardown()
+        except Exception:
+            # catch any exception in teardown and convert to simple exception
+            # never pass exceptions back to multiprocessing, because some exceptions can
+            # be unpicklable
+            # https://bugs.python.org/issue9400
+            LOG.exception("")
+            raise SystemExit(1)
+
+    LOG.debug("queue.qsize() = %s", queue.qsize())
+
+
+class IterationRunner(base.Runner):
+    """Run a step to find the max throughput
+
+If the step ends before the time has elapsed, it will be started again.
+
+  Parameters
+    interval - time to wait between each step invocation
+        type:    int
+        unit:    seconds
+        default: 1 sec
+    delta - stop condition for the search.
+        type:	 int
+        unit:	 pps
+        default: 1000 pps
+    """
+    __execution_type__ = 'Dynamictp'
+
+    def _run_step(self, cls, method, step_cfg, context_cfg):
+        name = "{}-{}-{}".format(self.__execution_type__, step_cfg.get("type"), os.getpid())
+        self.process = multiprocessing.Process(
+            name=name,
+            target=_worker_process,
+            args=(self.result_queue, cls, method, step_cfg,
+                  context_cfg, self.aborted))
+        self.process.start()
diff --git a/vnftest/onap/runners/iteration.py b/vnftest/onap/runners/iteration.py
new file mode 100644
index 0000000..9bac92e
--- /dev/null
+++ b/vnftest/onap/runners/iteration.py
@@ -0,0 +1,160 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/runners/iteration.py
+
+"""A runner that runs a configurable number of times before it returns
+"""
+
+from __future__ import absolute_import
+
+import logging
+import multiprocessing
+import time
+import traceback
+
+import os
+
+from vnftest.onap.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+QUEUE_PUT_TIMEOUT = 10
+
+
+def _worker_process(queue, cls, method_name, step_cfg,
+                    context_cfg, aborted, output_queue):
+
+    sequence = 1
+
+    runner_cfg = step_cfg['runner']
+
+    interval = runner_cfg.get("interval", 1)
+    iterations = runner_cfg.get("iterations", 1)
+    run_step = runner_cfg.get("run_step", "setup,run,teardown")
+
+    delta = runner_cfg.get("delta", 2)
+    LOG.info("worker START, iterations %d times, class %s", iterations, cls)
+
+    runner_cfg['runner_id'] = os.getpid()
+
+    step = cls(step_cfg, context_cfg)
+    if "setup" in run_step:
+        step.setup()
+
+    method = getattr(step, method_name)
+
+    sla_action = None
+    if "sla" in step_cfg:
+        sla_action = step_cfg["sla"].get("action", "assert")
+    if "run" in run_step:
+        while True:
+
+            LOG.debug("runner=%(runner)s seq=%(sequence)s START",
+                      {"runner": runner_cfg["runner_id"],
+                       "sequence": sequence})
+
+            data = {}
+            errors = ""
+
+            try:
+                result = method(data)
+            except AssertionError as assertion:
+                # SLA validation failed in step, determine what to do now
+                if sla_action == "assert":
+                    raise
+                elif sla_action == "monitor":
+                    LOG.warning("SLA validation failed: %s", assertion.args)
+                    errors = assertion.args
+                elif sla_action == "rate-control":
+                    try:
+                        step_cfg['options']['rate']
+                    except KeyError:
+                        step_cfg.setdefault('options', {})
+                        step_cfg['options']['rate'] = 100
+
+                    step_cfg['options']['rate'] -= delta
+                    sequence = 1
+                    continue
+            except Exception:
+                errors = traceback.format_exc()
+                LOG.exception("")
+            else:
+                if result:
+                    # add timeout for put so we don't block test
+                    # if we do timeout we don't care about dropping individual KPIs
+                    output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
+
+            time.sleep(interval)
+
+            step_output = {
+                'timestamp': time.time(),
+                'sequence': sequence,
+                'data': data,
+                'errors': errors
+            }
+
+            queue.put(step_output, True, QUEUE_PUT_TIMEOUT)
+
+            LOG.debug("runner=%(runner)s seq=%(sequence)s END",
+                      {"runner": runner_cfg["runner_id"],
+                       "sequence": sequence})
+
+            sequence += 1
+
+            if (errors and sla_action is None) or \
+                    (sequence > iterations or aborted.is_set()):
+                LOG.info("worker END")
+                break
+    if "teardown" in run_step:
+        try:
+            step.teardown()
+        except Exception:
+            # catch any exception in teardown and convert to simple exception
+            # never pass exceptions back to multiprocessing, because some exceptions can
+            # be unpicklable
+            # https://bugs.python.org/issue9400
+            LOG.exception("")
+            raise SystemExit(1)
+
+    LOG.debug("queue.qsize() = %s", queue.qsize())
+    LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
+
+
+class IterationRunner(base.Runner):
+    """Run a step for a configurable number of times
+
+If the step ends before the time has elapsed, it will be started again.
+
+  Parameters
+    iterations - amount of times the step will be run for
+        type:    int
+        unit:    na
+        default: 1
+    interval - time to wait between each step invocation
+        type:    int
+        unit:    seconds
+        default: 1 sec
+    """
+    __execution_type__ = 'Iteration'
+
+    def _run_step(self, cls, method, step_cfg, context_cfg):
+        name = "{}-{}-{}".format(self.__execution_type__, step_cfg.get("type"), os.getpid())
+        self.process = multiprocessing.Process(
+            name=name,
+            target=_worker_process,
+            args=(self.result_queue, cls, method, step_cfg,
+                  context_cfg, self.aborted, self.output_queue))
+        self.process.start()
diff --git a/vnftest/onap/runners/search.py b/vnftest/onap/runners/search.py
new file mode 100644
index 0000000..d5bd417
--- /dev/null
+++ b/vnftest/onap/runners/search.py
@@ -0,0 +1,180 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/runners/search.py
+
+"""A runner that runs a specific time before it returns
+"""
+
+from __future__ import absolute_import
+
+import logging
+import multiprocessing
+import time
+import traceback
+from contextlib import contextmanager
+from itertools import takewhile
+
+import os
+from collections import Mapping
+from six.moves import zip
+
+from vnftest.onap.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+class SearchRunnerHelper(object):
+
+    def __init__(self, cls, method_name, step_cfg, context_cfg, aborted):
+        super(SearchRunnerHelper, self).__init__()
+        self.cls = cls
+        self.method_name = method_name
+        self.step_cfg = step_cfg
+        self.context_cfg = context_cfg
+        self.aborted = aborted
+        self.runner_cfg = step_cfg['runner']
+        self.run_step = self.runner_cfg.get("run_step", "setup,run,teardown")
+        self.timeout = self.runner_cfg.get("timeout", 60)
+        self.interval = self.runner_cfg.get("interval", 1)
+        self.step = None
+        self.method = None
+
+    def __call__(self, *args, **kwargs):
+        if self.method is None:
+            raise RuntimeError
+        return self.method(*args, **kwargs)
+
+    @contextmanager
+    def get_step_instance(self):
+        self.step = self.cls(self.step_cfg, self.context_cfg)
+
+        if 'setup' in self.run_step:
+            self.step.setup()
+
+        self.method = getattr(self.step, self.method_name)
+        LOG.info("worker START, timeout %d sec, class %s", self.timeout, self.cls)
+        try:
+            yield self
+        finally:
+            if 'teardown' in self.run_step:
+                self.step.teardown()
+
+    def is_not_done(self):
+        if 'run' not in self.run_step:
+            raise StopIteration
+
+        max_time = time.time() + self.timeout
+
+        abort_iter = iter(self.aborted.is_set, True)
+        time_iter = takewhile(lambda t_now: t_now <= max_time, iter(time.time, -1))
+
+        for seq, _ in enumerate(zip(abort_iter, time_iter), 1):
+            yield seq
+            time.sleep(self.interval)
+
+
+class SearchRunner(base.Runner):
+    """Run a step for a certain amount of time
+
+If the step ends before the time has elapsed, it will be started again.
+
+  Parameters
+    timeout - amount of time the step will be run for
+        type:    int
+        unit:    seconds
+        default: 1 sec
+    interval - time to wait between each step invocation
+        type:    int
+        unit:    seconds
+        default: 1 sec
+    """
+    __execution_type__ = 'Search'
+
+    def __init__(self, config):
+        super(SearchRunner, self).__init__(config)
+        self.runner_cfg = None
+        self.runner_id = None
+        self.sla_action = None
+        self.worker_helper = None
+
+    def _worker_run_once(self, sequence):
+        LOG.debug("runner=%s seq=%s START", self.runner_id, sequence)
+
+        data = {}
+        errors = ""
+
+        try:
+            self.worker_helper(data)
+        except AssertionError as assertion:
+            # SLA validation failed in step, determine what to do now
+            if self.sla_action == "assert":
+                raise
+            elif self.sla_action == "monitor":
+                LOG.warning("SLA validation failed: %s", assertion.args)
+                errors = assertion.args
+        except Exception as e:
+            errors = traceback.format_exc()
+            LOG.exception(e)
+
+        record = {
+            'runner_id': self.runner_id,
+            'step': {
+                'timestamp': time.time(),
+                'sequence': sequence,
+                'data': data,
+                'errors': errors,
+            },
+        }
+
+        self.result_queue.put(record)
+
+        LOG.debug("runner=%s seq=%s END", self.runner_id, sequence)
+
+        # Have to search through all the VNF KPIs
+        kpi_done = any(kpi.get('done') for kpi in data.values() if isinstance(kpi, Mapping))
+
+        return kpi_done or (errors and self.sla_action is None)
+
+    def _worker_run(self, cls, method_name, step_cfg, context_cfg):
+        self.runner_cfg = step_cfg['runner']
+        self.runner_id = self.runner_cfg['runner_id'] = os.getpid()
+
+        self.worker_helper = SearchRunnerHelper(cls, method_name, step_cfg,
+                                                context_cfg, self.aborted)
+
+        try:
+            self.sla_action = step_cfg['sla'].get('action', 'assert')
+        except KeyError:
+            self.sla_action = None
+
+        self.result_queue.put({
+            'runner_id': self.runner_id,
+            'step_cfg': step_cfg,
+            'context_cfg': context_cfg
+        })
+
+        with self.worker_helper.get_step_instance():
+            for sequence in self.worker_helper.is_not_done():
+                if self._worker_run_once(sequence):
+                    LOG.info("worker END")
+                    break
+
+    def _run_step(self, cls, method, step_cfg, context_cfg):
+        name = "{}-{}-{}".format(self.__execution_type__, step_cfg.get("type"), os.getpid())
+        self.process = multiprocessing.Process(
+            name=name,
+            target=self._worker_run,
+            args=(cls, method, step_cfg, context_cfg))
+        self.process.start()
diff --git a/vnftest/onap/runners/sequence.py b/vnftest/onap/runners/sequence.py
new file mode 100644
index 0000000..b341495
--- /dev/null
+++ b/vnftest/onap/runners/sequence.py
@@ -0,0 +1,149 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/runners/sequence.py
+
+"""A runner that every run changes a specified input value to the step.
+The input value in the sequence is specified in a list in the input file.
+"""
+
+from __future__ import absolute_import
+
+import logging
+import multiprocessing
+import time
+import traceback
+
+import os
+
+from vnftest.onap.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+def _worker_process(queue, cls, method_name, step_cfg,
+                    context_cfg, aborted, output_queue):
+
+    sequence = 1
+
+    runner_cfg = step_cfg['runner']
+
+    interval = runner_cfg.get("interval", 1)
+    arg_name = runner_cfg.get('step_option_name')
+    sequence_values = runner_cfg.get('sequence')
+
+    if 'options' not in step_cfg:
+        step_cfg['options'] = {}
+
+    options = step_cfg['options']
+
+    runner_cfg['runner_id'] = os.getpid()
+
+    LOG.info("worker START, sequence_values(%s, %s), class %s",
+             arg_name, sequence_values, cls)
+
+    step = cls(step_cfg, context_cfg)
+    step.setup()
+    method = getattr(step, method_name)
+
+    sla_action = None
+    if "sla" in step_cfg:
+        sla_action = step_cfg["sla"].get("action", "assert")
+
+    for value in sequence_values:
+        options[arg_name] = value
+
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
+                  {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+        data = {}
+        errors = ""
+
+        try:
+            result = method(data)
+        except AssertionError as assertion:
+            # SLA validation failed in step, determine what to do now
+            if sla_action == "assert":
+                raise
+            elif sla_action == "monitor":
+                LOG.warning("SLA validation failed: %s", assertion.args)
+                errors = assertion.args
+        except Exception as e:
+            errors = traceback.format_exc()
+            LOG.exception(e)
+        else:
+            if result:
+                output_queue.put(result)
+
+        time.sleep(interval)
+
+        step_output = {
+            'timestamp': time.time(),
+            'sequence': sequence,
+            'data': data,
+            'errors': errors
+        }
+
+        queue.put(step_output)
+
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
+                  {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+        sequence += 1
+
+        if (errors and sla_action is None) or aborted.is_set():
+            break
+
+    try:
+        step.teardown()
+    except Exception:
+        # catch any exception in teardown and convert to simple exception
+        # never pass exceptions back to multiprocessing, because some exceptions can
+        # be unpicklable
+        # https://bugs.python.org/issue9400
+        LOG.exception("")
+        raise SystemExit(1)
+    LOG.info("worker END")
+    LOG.debug("queue.qsize() = %s", queue.qsize())
+    LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
+
+
+class SequenceRunner(base.Runner):
+    """Run a step by changing an input value defined in a list
+
+  Parameters
+    interval - time to wait between each step invocation
+        type:    int
+        unit:    seconds
+        default: 1 sec
+    step_option_name - name of the option that is increased each invocation
+        type:    string
+        unit:    na
+        default: none
+    sequence - list of values which are executed in their respective steps
+        type:    [int]
+        unit:    na
+        default: none
+    """
+
+    __execution_type__ = 'Sequence'
+
+    def _run_step(self, cls, method, step_cfg, context_cfg):
+        name = "{}-{}-{}".format(self.__execution_type__, step_cfg.get("type"), os.getpid())
+        self.process = multiprocessing.Process(
+            name=name,
+            target=_worker_process,
+            args=(self.result_queue, cls, method, step_cfg,
+                  context_cfg, self.aborted, self.output_queue))
+        self.process.start()
diff --git a/vnftest/onap/steps/__init__.py b/vnftest/onap/steps/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/onap/steps/__init__.py
diff --git a/vnftest/onap/steps/base.py b/vnftest/onap/steps/base.py
new file mode 100644
index 0000000..d5c606a
--- /dev/null
+++ b/vnftest/onap/steps/base.py
@@ -0,0 +1,89 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/steps/base.py
+
+""" Step base class
+"""
+
+from __future__ import absolute_import
+import vnftest.common.utils as utils
+
+
+class Step(object):
+
+    def setup(self):
+        """ default impl for step setup """
+        pass
+
+    def run(self, args):
+        """ catcher for not implemented run methods in subclasses """
+        raise RuntimeError("run method not implemented")
+
+    def teardown(self):
+        """ default impl for step teardown """
+        pass
+
+    @staticmethod
+    def get_types():
+        """return a list of known runner type (class) names"""
+        steps = []
+        for step in utils.itersubclasses(Step):
+            steps.append(step)
+        return steps
+
+    @staticmethod
+    def get_cls(step_type):
+        """return class of specified type"""
+        for step in utils.itersubclasses(Step):
+            if step_type == step.__step_type__:
+                return step
+
+        raise RuntimeError("No such step type %s" % step_type)
+
+    @staticmethod
+    def get(step_type):
+        """Returns instance of a step runner for execution type.
+        """
+        for step in utils.itersubclasses(Step):
+            if step_type == step.__step_type__:
+                return step.__module__ + "." + step.__name__
+
+        raise RuntimeError("No such step type %s" % step_type)
+
+    @classmethod
+    def get_step_type(cls):
+        """Return a string with the step type, if defined"""
+        return str(getattr(cls, '__step_type__', None))
+
+    @classmethod
+    def get_description(cls):
+        """Return a single line string with the class description
+
+        This function will retrieve the class docstring and return the first
+        line, or 'None' if it's empty.
+        """
+        return cls.__doc__.splitlines()[0] if cls.__doc__ else str(None)
+
+    def _push_to_outputs(self, keys, values):
+        return dict(zip(keys, values))
+
+    def _change_obj_to_dict(self, obj):
+        dic = {}
+        for k, v in vars(obj).items():
+            try:
+                vars(v)
+            except TypeError:
+                dic[k] = v
+        return dic
diff --git a/vnftest/onap/steps/dummy/__init__.py b/vnftest/onap/steps/dummy/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/onap/steps/dummy/__init__.py
diff --git a/vnftest/onap/steps/dummy/dummy.py b/vnftest/onap/steps/dummy/dummy.py
new file mode 100644
index 0000000..27e9a32
--- /dev/null
+++ b/vnftest/onap/steps/dummy/dummy.py
@@ -0,0 +1,42 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+from __future__ import absolute_import
+import logging
+
+from vnftest.onap.steps import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Dummy(base.Step):
+    """Execute Dummy echo
+    """
+    __step_type__ = "Dummy"
+
+    def __init__(self, step_cfg, context_cfg):
+        self.step_cfg = step_cfg
+        self.context_cfg = context_cfg
+        self.setup_done = False
+
+    def setup(self):
+        """step setup"""
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the benchmark"""
+        if not self.setup_done:
+            self.setup()
+
+        result["hello"] = "vnftest"
+        LOG.info("Dummy echo hello vnftest!")
diff --git a/vnftest/onap/steps/onap_api_call.py b/vnftest/onap/steps/onap_api_call.py
new file mode 100644
index 0000000..ecb2ce3
--- /dev/null
+++ b/vnftest/onap/steps/onap_api_call.py
@@ -0,0 +1,145 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+from __future__ import absolute_import
+
+import logging
+import time
+import os
+import yaml
+import copy
+from vnftest.onap.steps import base
+from vnftest.common import rest_client
+from vnftest.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+
+
+class OnapApiCall(base.Step):
+
+    __step_type__ = "OnapApiCall"
+
+    def __init__(self, step_cfg, context_cfg):
+        self.step_cfg = step_cfg
+        self.context_cfg = context_cfg
+        self.input = None
+        self.output = None
+        self.rest_def_file = None
+        self.setup_done = False
+        self.curr_path = os.path.dirname(os.path.abspath(__file__))
+
+    def setup(self):
+        options = self.step_cfg['options']
+        self.rest_def_file = options.get("file")
+        self.input = options.get("input")
+        self.output = options.get("output")
+        self.setup_done = True
+
+    def run(self, args):
+        if not self.setup_done:
+            self.setup()
+        params = copy.deepcopy(consts.component_constants)
+        for input_parameter in self.input:
+            param_name = input_parameter['parameter_name']
+            param_value = input_parameter['value']
+            params[param_name] = param_value
+        result = self.execute_operation(params)
+        result_body = result['body']
+        for output_parameter in self.output:
+            param_name = output_parameter['parameter_name']
+            param_path = output_parameter['path']
+            path_list = param_path.split("|")
+            param_value = result_body
+            for path_element in path_list:
+                param_value = param_value[path_element]
+        self.context_cfg[param_name] = param_value
+
+    def execute_operation(self, params, attempt=0):
+        try:
+            return self.execute_operation_impl(params)
+        except Exception as e:
+            LOG.info(str(e))
+            if attempt < 2:
+                time.sleep(15)
+                LOG.info("############# retry operation ##########")
+                attempt = attempt + 1
+                return self.execute_operation(params, attempt)
+            else:
+                raise e
+
+    def execute_operation_impl(self, params):
+        input_yaml = self.rest_def_file
+        LOG.info("########## processing " + input_yaml + "##########")
+        yaml_path = os.path.join(self.curr_path, input_yaml)
+        with open(yaml_path) as info:
+            operation = yaml.load(info)
+        operation = self.format(operation, params)
+        url = operation['url']
+        headers = operation['headers']
+        body = {}
+        if 'body' in operation:
+            body = operation['body']
+        LOG.info(url)
+        LOG.info(headers)
+        LOG.info(body)
+        if 'file' in operation:
+            file_path = operation['file']
+            LOG.info(file_path)
+            files = {'upload': open(file_path)}
+            result = rest_client.upload_file(url, headers, files, LOG)
+        else:
+            result = rest_client.call(url,
+                                      operation['method'],
+                                      headers,
+                                      body,
+                                      LOG)
+        if result['return_code'] >= 300:
+            raise RuntimeError(
+                "Operation failed. return_code:{}, message:{}".format(result['return_code'], result['body']))
+        LOG.info("Results: " + str(result))
+        return result
+
+    def format(self, d, params):
+        ret = None
+        if isinstance(d, dict):
+            ret = {}
+            for k, v in d.iteritems():
+                if isinstance(v, basestring):
+                    v = self.format_string(v, params)
+                else:
+                    v = self.format(v, params)
+                ret[k] = v
+        if isinstance(d, list):
+            ret = []
+            for v in d:
+                if isinstance(v, basestring):
+                    v = self.format_string(v, params)
+                else:
+                    v = self.format(v, params)
+                ret.append(v)
+        if isinstance(d, basestring):
+            ret = self.format_string(d, params)
+        return ret
+
+    def format_string(self, st, params):
+        try:
+            return st.format(**params)
+        except Exception as e:
+            s = str(e)
+            s = s.replace("'", "")
+            LOG.info(s)
+            params[s] = ""
+            LOG.info("param" + params[s])
+            return st.format(**params)
+
+
diff --git a/vnftest/onap/steps/onboard/__init__.py b/vnftest/onap/steps/onboard/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/onap/steps/onboard/__init__.py
diff --git a/vnftest/onap/steps/onboard/create_vlm.yaml b/vnftest/onap/steps/onboard/create_vlm.yaml
new file mode 100644
index 0000000..dce110a
--- /dev/null
+++ b/vnftest/onap/steps/onboard/create_vlm.yaml
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+---
+method: "POST"
+url: "http://{sdc_ip}:{sdc_port}/onboarding-api/v1.0/vendor-license-models"
+headers: {
+        "Content-Type": "application/json",
+        "Authorization": "Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA==",
+        "USER_ID": "{sdc_designer_user}",
+        "Accept": "application/json"
+        }
+body: {
+        "vendorName": "{vendor_name}",
+        "description": "vlm via dovetail",
+        "iconRef": "icon"
+      }
\ No newline at end of file
diff --git a/vnftest/ssh.py b/vnftest/ssh.py
new file mode 100644
index 0000000..cca0c2c
--- /dev/null
+++ b/vnftest/ssh.py
@@ -0,0 +1,497 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+#  Licensed under the ApacheLicense, Version2.0 (the"License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of rally/rally/common/sshutils.py
+
+"""High level ssh library.
+
+Usage examples:
+
+Execute command and get output:
+
+    ssh = sshclient.SSH("root", "example.com", port=33)
+    status, stdout, stderr = ssh.execute("ps ax")
+    if status:
+        raise Exception("Command failed with non-zero status.")
+    print(stdout.splitlines())
+
+Execute command with huge output:
+
+    class PseudoFile(io.RawIOBase):
+        def write(chunk):
+            if "error" in chunk:
+                email_admin(chunk)
+
+    ssh = SSH("root", "example.com")
+    with PseudoFile() as p:
+        ssh.run("tail -f /var/log/syslog", stdout=p, timeout=False)
+
+Execute local script on remote side:
+
+    ssh = sshclient.SSH("user", "example.com")
+
+    with open("~/myscript.sh", "r") as stdin_file:
+        status, out, err = ssh.execute('/bin/sh -s "arg1" "arg2"',
+                                       stdin=stdin_file)
+
+Upload file:
+
+    ssh = SSH("user", "example.com")
+    # use rb for binary files
+    with open("/store/file.gz", "rb") as stdin_file:
+        ssh.run("cat > ~/upload/file.gz", stdin=stdin_file)
+
+Eventlet:
+
+    eventlet.monkey_patch(select=True, time=True)
+    or
+    eventlet.monkey_patch()
+    or
+    sshclient = eventlet.import_patched("vnftest.ssh")
+
+"""
+from __future__ import absolute_import
+import os
+import io
+import select
+import socket
+import time
+import re
+
+import logging
+
+import paramiko
+from chainmap import ChainMap
+from oslo_utils import encodeutils
+from scp import SCPClient
+import six
+from vnftest.common.utils import try_int
+
+
+def convert_key_to_str(key):
+    if not isinstance(key, (paramiko.RSAKey, paramiko.DSSKey)):
+        return key
+    k = io.StringIO()
+    key.write_private_key(k)
+    return k.getvalue()
+
+
+class SSHError(Exception):
+    pass
+
+
+class SSHTimeout(SSHError):
+    pass
+
+
+class SSH(object):
+    """Represent ssh connection."""
+
+    SSH_PORT = paramiko.config.SSH_PORT
+
+    @staticmethod
+    def gen_keys(key_filename, bit_count=2048):
+        rsa_key = paramiko.RSAKey.generate(bits=bit_count, progress_func=None)
+        rsa_key.write_private_key_file(key_filename)
+        print("Writing %s ..." % key_filename)
+        with open('.'.join([key_filename, "pub"]), "w") as pubkey_file:
+            pubkey_file.write(rsa_key.get_name())
+            pubkey_file.write(' ')
+            pubkey_file.write(rsa_key.get_base64())
+            pubkey_file.write('\n')
+
+    @staticmethod
+    def get_class():
+        # must return static class name, anything else refers to the calling class
+        # i.e. the subclass, not the superclass
+        return SSH
+
+    def __init__(self, user, host, port=None, pkey=None,
+                 key_filename=None, password=None, name=None):
+        """Initialize SSH client.
+
+        :param user: ssh username
+        :param host: hostname or ip address of remote ssh server
+        :param port: remote ssh port
+        :param pkey: RSA or DSS private key string or file object
+        :param key_filename: private key filename
+        :param password: password
+        """
+        self.name = name
+        if name:
+            self.log = logging.getLogger(__name__ + '.' + self.name)
+        else:
+            self.log = logging.getLogger(__name__)
+
+        self.user = user
+        self.host = host
+        # everybody wants to debug this in the caller, do it here instead
+        self.log.debug("user:%s host:%s", user, host)
+
+        # we may get text port from YAML, convert to int
+        self.port = try_int(port, self.SSH_PORT)
+        self.pkey = self._get_pkey(pkey) if pkey else None
+        self.password = password
+        self.key_filename = key_filename
+        self._client = False
+        # paramiko loglevel debug will output ssh protocl debug
+        # we don't ever really want that unless we are debugging paramiko
+        # ssh issues
+        if os.environ.get("PARAMIKO_DEBUG", "").lower() == "true":
+            logging.getLogger("paramiko").setLevel(logging.DEBUG)
+        else:
+            logging.getLogger("paramiko").setLevel(logging.WARN)
+
+    @classmethod
+    def args_from_node(cls, node, overrides=None, defaults=None):
+        if overrides is None:
+            overrides = {}
+        if defaults is None:
+            defaults = {}
+        params = ChainMap(overrides, node, defaults)
+        return {
+            'user': params['user'],
+            'host': params['ip'],
+            'port': params.get('ssh_port', cls.SSH_PORT),
+            'pkey': params.get('pkey'),
+            'key_filename': params.get('key_filename'),
+            'password': params.get('password'),
+            'name': params.get('name'),
+        }
+
+    @classmethod
+    def from_node(cls, node, overrides=None, defaults=None):
+        return cls(**cls.args_from_node(node, overrides, defaults))
+
+    def _get_pkey(self, key):
+        if isinstance(key, six.string_types):
+            key = six.moves.StringIO(key)
+        errors = []
+        for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
+            try:
+                return key_class.from_private_key(key)
+            except paramiko.SSHException as e:
+                errors.append(e)
+        raise SSHError("Invalid pkey: %s" % (errors))
+
+    @property
+    def is_connected(self):
+        return bool(self._client)
+
+    def _get_client(self):
+        if self.is_connected:
+            return self._client
+        try:
+            self._client = paramiko.SSHClient()
+            self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+            self._client.connect(self.host, username=self.user,
+                                 port=self.port, pkey=self.pkey,
+                                 key_filename=self.key_filename,
+                                 password=self.password,
+                                 allow_agent=False, look_for_keys=False,
+                                 timeout=1)
+            return self._client
+        except Exception as e:
+            message = ("Exception %(exception_type)s was raised "
+                       "during connect. Exception value is: %(exception)r")
+            self._client = False
+            raise SSHError(message % {"exception": e,
+                                      "exception_type": type(e)})
+
+    def _make_dict(self):
+        return {
+            'user': self.user,
+            'host': self.host,
+            'port': self.port,
+            'pkey': self.pkey,
+            'key_filename': self.key_filename,
+            'password': self.password,
+            'name': self.name,
+        }
+
+    def copy(self):
+        return self.get_class()(**self._make_dict())
+
+    def close(self):
+        if self._client:
+            self._client.close()
+            self._client = False
+
+    def run(self, cmd, stdin=None, stdout=None, stderr=None,
+            raise_on_error=True, timeout=3600,
+            keep_stdin_open=False, pty=False):
+        """Execute specified command on the server.
+
+        :param cmd:             Command to be executed.
+        :type cmd:              str
+        :param stdin:           Open file or string to pass to stdin.
+        :param stdout:          Open file to connect to stdout.
+        :param stderr:          Open file to connect to stderr.
+        :param raise_on_error:  If False then exit code will be return. If True
+                                then exception will be raized if non-zero code.
+        :param timeout:         Timeout in seconds for command execution.
+                                Default 1 hour. No timeout if set to 0.
+        :param keep_stdin_open: don't close stdin on empty reads
+        :type keep_stdin_open:  bool
+        :param pty:             Request a pseudo terminal for this connection.
+                                This allows passing control characters.
+                                Default False.
+        :type pty:              bool
+        """
+
+        client = self._get_client()
+
+        if isinstance(stdin, six.string_types):
+            stdin = six.moves.StringIO(stdin)
+
+        return self._run(client, cmd, stdin=stdin, stdout=stdout,
+                         stderr=stderr, raise_on_error=raise_on_error,
+                         timeout=timeout,
+                         keep_stdin_open=keep_stdin_open, pty=pty)
+
+    def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
+             raise_on_error=True, timeout=3600,
+             keep_stdin_open=False, pty=False):
+
+        transport = client.get_transport()
+        session = transport.open_session()
+        if pty:
+            session.get_pty()
+        session.exec_command(cmd)
+        start_time = time.time()
+
+        # encode on transmit, decode on receive
+        data_to_send = encodeutils.safe_encode("", incoming='utf-8')
+        stderr_data = None
+
+        # If we have data to be sent to stdin then `select' should also
+        # check for stdin availability.
+        if stdin and not stdin.closed:
+            writes = [session]
+        else:
+            writes = []
+
+        while True:
+            # Block until data can be read/write.
+            r, w, e = select.select([session], writes, [session], 1)
+
+            if session.recv_ready():
+                data = encodeutils.safe_decode(session.recv(4096), 'utf-8')
+                self.log.debug("stdout: %r", data)
+                if stdout is not None:
+                    stdout.write(data)
+                continue
+
+            if session.recv_stderr_ready():
+                stderr_data = encodeutils.safe_decode(
+                    session.recv_stderr(4096), 'utf-8')
+                self.log.debug("stderr: %r", stderr_data)
+                if stderr is not None:
+                    stderr.write(stderr_data)
+                continue
+
+            if session.send_ready():
+                if stdin is not None and not stdin.closed:
+                    if not data_to_send:
+                        stdin_txt = stdin.read(4096)
+                        if stdin_txt is None:
+                            stdin_txt = ''
+                        data_to_send = encodeutils.safe_encode(
+                            stdin_txt, incoming='utf-8')
+                        if not data_to_send:
+                            # we may need to keep stdin open
+                            if not keep_stdin_open:
+                                stdin.close()
+                                session.shutdown_write()
+                                writes = []
+                    if data_to_send:
+                        sent_bytes = session.send(data_to_send)
+                        # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
+                        data_to_send = data_to_send[sent_bytes:]
+
+            if session.exit_status_ready():
+                break
+
+            if timeout and (time.time() - timeout) > start_time:
+                args = {"cmd": cmd, "host": self.host}
+                raise SSHTimeout("Timeout executing command "
+                                 "'%(cmd)s' on host %(host)s" % args)
+            if e:
+                raise SSHError("Socket error.")
+
+        exit_status = session.recv_exit_status()
+        if exit_status != 0 and raise_on_error:
+            fmt = "Command '%(cmd)s' failed with exit_status %(status)d."
+            details = fmt % {"cmd": cmd, "status": exit_status}
+            if stderr_data:
+                details += " Last stderr data: '%s'." % stderr_data
+            raise SSHError(details)
+        return exit_status
+
+    def execute(self, cmd, stdin=None, timeout=3600):
+        """Execute the specified command on the server.
+
+        :param cmd:     Command to be executed.
+        :param stdin:   Open file to be sent on process stdin.
+        :param timeout: Timeout for execution of the command.
+
+        :returns: tuple (exit_status, stdout, stderr)
+        """
+        stdout = six.moves.StringIO()
+        stderr = six.moves.StringIO()
+
+        exit_status = self.run(cmd, stderr=stderr,
+                               stdout=stdout, stdin=stdin,
+                               timeout=timeout, raise_on_error=False)
+        stdout.seek(0)
+        stderr.seek(0)
+        return exit_status, stdout.read(), stderr.read()
+
+    def wait(self, timeout=120, interval=1):
+        """Wait for the host will be available via ssh."""
+        start_time = time.time()
+        while True:
+            try:
+                return self.execute("uname")
+            except (socket.error, SSHError) as e:
+                self.log.debug("Ssh is still unavailable: %r", e)
+                time.sleep(interval)
+            if time.time() > (start_time + timeout):
+                raise SSHTimeout("Timeout waiting for '%s'", self.host)
+
+    def put(self, files, remote_path=b'.', recursive=False):
+        client = self._get_client()
+
+        with SCPClient(client.get_transport()) as scp:
+            scp.put(files, remote_path, recursive)
+
+    def get(self, remote_path, local_path='/tmp/', recursive=True):
+        client = self._get_client()
+
+        with SCPClient(client.get_transport()) as scp:
+            scp.get(remote_path, local_path, recursive)
+
+    # keep shell running in the background, e.g. screen
+    def send_command(self, command):
+        client = self._get_client()
+        client.exec_command(command, get_pty=True)
+
+    def _put_file_sftp(self, localpath, remotepath, mode=None):
+        client = self._get_client()
+
+        with client.open_sftp() as sftp:
+            sftp.put(localpath, remotepath)
+            if mode is None:
+                mode = 0o777 & os.stat(localpath).st_mode
+            sftp.chmod(remotepath, mode)
+
+    TILDE_EXPANSIONS_RE = re.compile("(^~[^/]*/)?(.*)")
+
+    def _put_file_shell(self, localpath, remotepath, mode=None):
+        # quote to stop wordpslit
+        tilde, remotepath = self.TILDE_EXPANSIONS_RE.match(remotepath).groups()
+        if not tilde:
+            tilde = ''
+        cmd = ['cat > %s"%s"' % (tilde, remotepath)]
+        if mode is not None:
+            # use -- so no options
+            cmd.append('chmod -- 0%o %s"%s"' % (mode, tilde, remotepath))
+
+        with open(localpath, "rb") as localfile:
+            # only chmod on successful cat
+            self.run("&& ".join(cmd), stdin=localfile)
+
+    def put_file(self, localpath, remotepath, mode=None):
+        """Copy specified local file to the server.
+
+        :param localpath:   Local filename.
+        :param remotepath:  Remote filename.
+        :param mode:        Permissions to set after upload
+        """
+        try:
+            self._put_file_sftp(localpath, remotepath, mode=mode)
+        except (paramiko.SSHException, socket.error):
+            self._put_file_shell(localpath, remotepath, mode=mode)
+
+    def put_file_obj(self, file_obj, remotepath, mode=None):
+        client = self._get_client()
+
+        with client.open_sftp() as sftp:
+            sftp.putfo(file_obj, remotepath)
+            if mode is not None:
+                sftp.chmod(remotepath, mode)
+
+    def get_file_obj(self, remotepath, file_obj):
+        client = self._get_client()
+
+        with client.open_sftp() as sftp:
+            sftp.getfo(remotepath, file_obj)
+
+
+class AutoConnectSSH(SSH):
+
+    # always wait or we will get OpenStack SSH errors
+    def __init__(self, user, host, port=None, pkey=None,
+                 key_filename=None, password=None, name=None, wait=True):
+        super(AutoConnectSSH, self).__init__(user, host, port, pkey, key_filename, password, name)
+        self._wait = wait
+
+    def _make_dict(self):
+        data = super(AutoConnectSSH, self)._make_dict()
+        data.update({
+            'wait': self._wait
+        })
+        return data
+
+    def _connect(self):
+        if not self.is_connected:
+            self._get_client()
+            if self._wait:
+                self.wait()
+
+    def drop_connection(self):
+        """ Don't close anything, just force creation of a new client """
+        self._client = False
+
+    def execute(self, cmd, stdin=None, timeout=3600):
+        self._connect()
+        return super(AutoConnectSSH, self).execute(cmd, stdin, timeout)
+
+    def run(self, cmd, stdin=None, stdout=None, stderr=None,
+            raise_on_error=True, timeout=3600,
+            keep_stdin_open=False, pty=False):
+        self._connect()
+        return super(AutoConnectSSH, self).run(cmd, stdin, stdout, stderr, raise_on_error,
+                                               timeout, keep_stdin_open, pty)
+
+    def put(self, files, remote_path=b'.', recursive=False):
+        self._connect()
+        return super(AutoConnectSSH, self).put(files, remote_path, recursive)
+
+    def put_file(self, local_path, remote_path, mode=None):
+        self._connect()
+        return super(AutoConnectSSH, self).put_file(local_path, remote_path, mode)
+
+    def put_file_obj(self, file_obj, remote_path, mode=None):
+        self._connect()
+        return super(AutoConnectSSH, self).put_file_obj(file_obj, remote_path, mode)
+
+    def get_file_obj(self, remote_path, file_obj):
+        self._connect()
+        return super(AutoConnectSSH, self).get_file_obj(remote_path, file_obj)
+
+    @staticmethod
+    def get_class():
+        # must return static class name, anything else refers to the calling class
+        # i.e. the subclass, not the superclass
+        return AutoConnectSSH