Merge "Modify the CSIT for vfc-svnfm-huawei"
diff --git a/bootstrap/vagrant-onap/Vagrantfile b/bootstrap/vagrant-onap/Vagrantfile
index 8417cc9..adc73ca 100644
--- a/bootstrap/vagrant-onap/Vagrantfile
+++ b/bootstrap/vagrant-onap/Vagrantfile
@@ -1,8 +1,9 @@
 # -*- mode: ruby -*-
 # vi: set ft=ruby :
 
-conf = {
-# Generic parameters used across all ONAP components
+
+configuration = {
+  # Generic parameters used across all ONAP components
   'public_net_id'       => '00000000-0000-0000-0000-000000000000',
   'key_name'            => 'ecomp_key',
   'pub_key'             => '',
@@ -32,379 +33,387 @@
   'enable_oparent'      => 'True'
 }
 
+
+box = {
+  :virtualbox => 'ubuntu/trusty64',
+  :libvirt => 'sputnik13/trusty64',
+  :openstack => nil
+}
+
+
+nodes = [
+    { 
+    :name   => "aai",
+    :ips    => ['10.252.0.6', "192.168.50.6"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["aai"]
+  },
+  { 
+    :name   => "all-in-one",
+    :ips    => ['10.252.0.3', "192.168.50.3"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 12 * 1024,
+    :groups => ["all-in-one"],
+    :flavor => 'm1.xlarge',
+    :args   => ['mr', 'sdc', 'aai', 'mso', 'robot', 'vid', 'sdnc', 'portal', 'dcae', 'policy', 'appc', 'vfc', 'ccsdk'],
+  },
+  { 
+    :name   => "appc",
+    :ips    => ['10.252.0.14', "192.168.50.14"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["appc"],
+  },
+  { 
+    :name   => "ccsdk",
+    :ips    => ['10.252.0.14', "192.168.50.17"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["ccsdk"],
+  },
+  { 
+    :name   => "dcae",
+    :ips    => ['10.252.0.12', "192.168.50.12"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["dcae"],
+  },
+  { 
+    :name   => "dns",
+    :ips    => ['10.252.0.3', "192.168.50.3"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 1 * 1024,
+    :groups => ["individual"],
+    :flavor => 'm1.small',
+    :args   => [" "]
+  },
+  { 
+    :name   => "message-router",
+    :ips    => ['10.252.0.4', "192.168.50.4"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["mr"],
+  },
+  { 
+    :name   => "mso",
+    :ips    => ['10.252.0.7', "192.168.50.7"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["mso"],
+  },
+  { 
+    :name   => "multicloud",
+    :ips    => ['10.252.0.16', "192.168.50.16"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["multicloud"],
+  },
+  { 
+    :name   => "policy",
+    :ips    => ['10.252.0.13', "192.168.50.13"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["policy"],
+  },
+  { 
+    :name   => "portal",
+    :ips    => ['10.252.0.11', "192.168.50.11"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["portal"],
+  },
+  { 
+    :name   => "robot",
+    :ips    => ['10.252.0.8', "192.168.50.8"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["robot"],
+  },
+  { 
+    :name   => "sdc",
+    :ips    => ['10.252.0.5', "192.168.50.5"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 8 * 1024,
+    :groups => ["individual"],
+    :args   => ["sdc"],
+  },
+  { 
+    :name   => "sdnc",
+    :ips    => ['10.252.0.10', "192.168.50.10"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ["sdnc"],    
+  },
+  { 
+    :name   => "testing",
+    :ips    => ['10.252.0.3', "192.168.50.3"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["testing"],
+    :flavor => 'm1.small',
+    :args   => [""],
+  },
+  { 
+    :name   => "vfc",
+    :ips    => ['10.252.0.15', "192.168.50.15"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ['vfc'],
+  },
+  
+  { 
+    :name   => "vid",
+    :ips    => ['10.252.0.9', "192.168.50.9"],
+    :macs   => [],
+    :cpus   => 2,
+    :cpu    => "50",
+    :ram    => 4 * 1024,
+    :groups => ["individual"],
+    :args   => ['vid'],
+  }, 
+]
+
+
+run_path = 'vagrant_utils/postinstall.sh'
+
+sdc_volume='vol1-sdc-data.vdi'
+
 Vagrant.require_version ">= 1.8.6"
 
-# Determine the OS for the host computer
-module OS
-    def OS.windows?
-        (/cygwin|mswin|mingw|bccwin|wince|emx/ =~ RUBY_PLATFORM) != nil
-    end
-
-    def OS.mac?
-        (/darwin/ =~ RUBY_PLATFORM) != nil
-    end
-
-    def OS.unix?
-        !OS.windows?
-    end
-
-    def OS.linux?
-        OS.unix? and not OS.mac?
-    end
-end
-
-if OS.windows?
-    puts "Vagrant launched from windows. This configuration has not fully tested."
-end
-
 # Determine the provider used
 provider = (ENV['VAGRANT_DEFAULT_PROVIDER'] || :virtualbox).to_sym
-puts "Using #{provider} provider"
+puts "[INFO] Provider: #{provider} "
+
 
 vd_conf = ENV.fetch('VD_CONF', 'etc/settings.yaml')
 if File.exist?(vd_conf)
   require 'yaml'
   user_conf = YAML.load_file(vd_conf)
-  conf.update(user_conf)
+  configuration.update(user_conf)
 end
 
+#Set network interface
+is_windows = Gem.win_platform?
+if is_windows
+    net_interface = 'VirtualBox Host-Only Ethernet Adapter #2'
+else
+    net_interface = 'vboxnet0'
+end
+puts "[INFO] Net interface: #{net_interface}"
+
+
+#If argument is given use it. Otherwise use Env: DEPLOY_MODE else use default
+requested_machine = ARGV[1]
+
 deploy_mode = ENV.fetch('DEPLOY_MODE', 'individual')
-sdc_volume='vol1-sdc-data.vdi'
+if requested_machine != nil
+    if requested_machine.include?("all-in-one") || requested_machine.include?("testing")
+        deploy_mode = requested_machine
+    end
+end
+
+#Catch the status of all machines
+if ARGV[0] == 'status' || ARGV[0] == 'destroy'
+    deploy_mode = 'NA'
+end
+
+puts "[INFO] Deploy Mode:  #{deploy_mode}"
+
+#In case of all-in-one or testing clean the nodes list
+case deploy_mode
+    when 'all-in-one'
+        nodes.select! do |node|
+            if node[:name].include?("all-in-one")
+              true if node[:name]
+            end
+        end
+
+    when 'individual'
+        nodes.select! do |node|
+            if node[:groups][0].include?("individual")
+              true if node[:name]
+               
+            end
+        end
+
+    when 'testing'
+        nodes.select! do |node|
+            if node[:name].include?("testing")
+              true if node[:name]
+            end
+        end
+end
 
 Vagrant.configure("2") do |config|
 
-  if ENV['http_proxy'] != nil and ENV['https_proxy'] != nil and ENV['no_proxy'] != nil
-    if not Vagrant.has_plugin?('vagrant-proxyconf')
-      system 'vagrant plugin install vagrant-proxyconf'
-      raise 'vagrant-proxyconf was installed but it requires to execute again'
+  # PROXY definitions
+    if ENV['http_proxy'] != nil and ENV['https_proxy'] != nil and ENV['no_proxy'] != nil
+      if not Vagrant.has_plugin?('vagrant-proxyconf')
+        system 'vagrant plugin install vagrant-proxyconf'
+        raise 'vagrant-proxyconf was installed but it requires to execute again'
+      end
+      config.proxy.http     = ENV['http_proxy']
+      config.proxy.https    = ENV['https_proxy']
+      config.proxy.no_proxy = ENV['no_proxy']
     end
-    config.proxy.http     = ENV['http_proxy']
-    config.proxy.https    = ENV['https_proxy']
-    config.proxy.no_proxy = ENV['no_proxy']
-  end
 
-  if Vagrant.has_plugin?('vagrant-vbguest')
-    puts 'vagrant-vbguest auto_update feature will be disable to avoid sharing conflicts'
-    config.vbguest.auto_update = false
-  end
-
-  config.vm.box = 'ubuntu/trusty64'
-  if provider == :libvirt
-    config.vm.box = 'sputnik13/trusty64'
-    if not Vagrant.has_plugin?('vagrant-libvirt')
-      system 'vagrant plugin install vagrant-libvirt'
-      raise 'vagrant-libvirt was installed but it requires to execute again'
+    if Vagrant.has_plugin?('vagrant-vbguest')
+      puts 'vagrant-vbguest auto_update feature will be disable to avoid sharing conflicts'
+      config.vbguest.auto_update = false
     end
-  end
-  if provider == :openstack
-    config.vm.box = nil
-    config.ssh.username = 'ubuntu'
-    if not Vagrant.has_plugin?('vagrant-openstack-provider')
-      system 'vagrant plugin install vagrant-openstack-provider'
-      raise 'vagrant-openstack-provider was installed but it requires to execute again'
+
+    if provider == :libvirt
+      if not Vagrant.has_plugin?('vagrant-libvirt')
+        system 'vagrant plugin install vagrant-libvirt'
+        raise 'vagrant-libvirt was installed but it requires to execute again'
+      end
     end
-  end
-  #config.vm.provision "docker"
-  config.vm.synced_folder './opt', '/opt/', create: true
-  config.vm.synced_folder './lib', '/var/onap/', create: true
-  config.vm.synced_folder '~/.m2', '/root/.m2/', create: true
+    if provider == :openstack
+      config.ssh.username = 'ubuntu'
+      if not Vagrant.has_plugin?('vagrant-openstack-provider')
+        system 'vagrant plugin install vagrant-openstack-provider'
+        raise 'vagrant-openstack-provider was installed but it requires to execute again'
+      end
+    end
 
-  config.vm.provider :virtualbox do |v|
-    v.customize ["modifyvm", :id, "--memory", 4 * 1024]
-  end
-  config.vm.provider :libvirt do |v|
-    v.memory = 4 * 1024
-    v.nested = true
-  end
-  config.vm.provider :openstack do |v|
+    nodes.each do |node|
+      config.vm.define node[:name] do |nodeconfig|
 
-    v.openstack_auth_url               = ENV.fetch('OS_AUTH_URL', '')
-    v.tenant_name                      = ENV.fetch('OS_TENANT_NAME', '')
-    v.username                         = ENV.fetch('OS_USERNAME', '')
-    v.password                         = ENV.fetch('OS_PASSWORD', '')
-    v.region                           = ENV.fetch('OS_REGION_NAME', '')
-    v.identity_api_version             = ENV.fetch('OS_IDENTITY_API_VERSION', '')
-    v.domain_name                      = ENV.fetch('OS_PROJECT_DOMAIN_ID', '')
-    v.project_name                     = ENV.fetch('OS_PROJECT_NAME', '')
-
-    v.floating_ip_pool                 = ENV.fetch('OS_FLOATING_IP_POOL', '')
-    v.floating_ip_pool_always_allocate = (ENV['OS_FLOATING_IP_ALWAYS_ALLOCATE'] == 'true')
-    v.image                            = ENV.fetch('OS_IMAGE', '')
-    v.security_groups                  = [ENV.fetch('OS_SEC_GROUP', '')]
-    v.flavor                           = 'm1.medium'
-    v.networks                         = ENV.fetch('OS_NETWORK', '')
-  end
-
-  case deploy_mode
-
-  when 'all-in-one'
-
-    config.vm.define :all_in_one do |all_in_one|
-      all_in_one.vm.hostname = 'all-in-one'
-      all_in_one.vm.network :private_network, ip: '192.168.50.3'
-      all_in_one.vm.provider "virtualbox" do |v|
-        v.customize ["modifyvm", :id, "--memory", 12 * 1024]
-        unless File.exist?(sdc_volume)
-           v.customize ['createhd', '--filename', sdc_volume, '--size', 20 * 1024]
+        # Common Settings:
+        
+        nodeconfig.vm.provider "virtualbox" do |vbox|
+          vbox.customize ['modifyvm', :id, '--nictype1', 'virtio']
+          vbox.customize ['modifyvm', :id, '--audio', 'none']
+          vbox.customize ['modifyvm', :id, '--vram', '1']
+          vbox.customize ['modifyvm', :id, "--cpuhotplug", "off"]
+          vbox.customize ['modifyvm', :id, "--cpuexecutioncap", node[:cpu]]
+          vbox.customize ['modifyvm', :id, "--cpus", node[:cpus]]  
+          vbox.customize ["modifyvm", :id, "--memory", node[:ram]]
         end
-        v.customize ['storageattach', :id, '--storagectl', 'SATAController', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', sdc_volume]
-      end
-      all_in_one.vm.provider "libvirt" do |v|
-        v.memory = 12 * 1024
-        v.nested = true
-        v.storage :file, path: sdc_volume, bus: 'sata', device: 'vdb', size: '2G'
-      end
-      all_in_one.vm.provider "openstack" do |v|
-        v.server_name = 'all-in-one'
-        v.flavor = 'm1.xlarge'
-      end
-      all_in_one.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['mr', 'sdc', 'aai', 'mso', 'robot', 'vid', 'sdnc', 'portal', 'dcae', 'policy', 'appc', 'vfc', 'ccsdk']
-        s.env = conf
-      end
-    end
-
-  when 'individual'
-
-    config.vm.define :dns do |dns|
-      dns.vm.hostname = 'dns'
-      dns.vm.network :private_network, ip: '192.168.50.3'
-      dns.vm.provider "virtualbox" do |v|
-        v.customize ["modifyvm", :id, "--memory", 1 * 1024]
-      end
-      dns.vm.provider "libvirt" do |v|
-        v.memory = 1 * 1024
-        v.nested = true
-      end
-      dns.vm.provider "openstack" do |v|
-        v.server_name = 'dns'
-        v.flavor = 'm1.small'
-      end
-      dns.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.env = conf
-      end 
-    end
-
-    config.vm.define :mr do |mr|
-      mr.vm.hostname = 'message-router'
-      mr.vm.network :private_network, ip: '192.168.50.4'
-      mr.vm.provider "openstack" do |v|
-        v.server_name = 'message-router'
-      end
-      mr.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['mr']
-        s.env = conf
-      end
-    end
-
-    config.vm.define :sdc do |sdc|
-      sdc.vm.hostname = 'sdc'
-      sdc.vm.network :private_network, ip: '192.168.50.5'
-      sdc.vm.provider "virtualbox" do |v|
-        unless File.exist?(sdc_volume)
-           v.customize ['createhd', '--filename', sdc_volume, '--size', 20 * 1024]
+        
+        nodeconfig.vm.provider "libvirt" do |lbox|
+          lbox.memory = node[:ram]
+          lbox.nested = true
         end
-        v.customize ['storageattach', :id, '--storagectl', 'SATAController', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', sdc_volume]
-      end
-      sdc.vm.provider "libvirt" do |v|
-        v.storage :file, path: sdc_volume, bus: 'sata', device: 'vdb', size: '2G'
-      end
-      sdc.vm.provider "openstack" do |v|
-        v.server_name = 'sdc'
-      end
-      sdc.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['sdc']
-        s.env = conf
-      end
-    end
+        
+        nodeconfig.vm.provider :openstack do |obox|
+          obox.openstack_auth_url               = ENV.fetch('OS_AUTH_URL', '')
+          obox.tenant_name                      = ENV.fetch('OS_TENANT_NAME', '')
+          obox.username                         = ENV.fetch('OS_USERNAME', '')
+          obox.password                         = ENV.fetch('OS_PASSWORD', '')
+          obox.region                           = ENV.fetch('OS_REGION_NAME', '')
+          obox.identity_api_version             = ENV.fetch('OS_IDENTITY_API_VERSION', '')
+          obox.domain_name                      = ENV.fetch('OS_PROJECT_DOMAIN_ID', '')
+          obox.project_name                     = ENV.fetch('OS_PROJECT_NAME', '')
+          obox.floating_ip_pool                 = ENV.fetch('OS_FLOATING_IP_POOL', '')
+          obox.floating_ip_pool_always_allocate = (ENV['OS_FLOATING_IP_ALWAYS_ALLOCATE'] == 'true')
+          obox.image                            = ENV.fetch('OS_IMAGE', '')
+          obox.security_groups                  = [ENV.fetch('OS_SEC_GROUP', '')]
+          obox.networks                         = ENV.fetch('OS_NETWORK', '')
+          obox.flavor                           = node[:flavor]
+          obox.server_name                      = node[:name]
 
-    config.vm.define :aai do |aai|
-      aai.vm.hostname = 'aai'
-      aai.vm.network :private_network, ip: '192.168.50.6'
-      aai.vm.provider "openstack" do |v|
-        v.server_name = 'aai'
-      end
-      aai.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['aai']
-        s.env = conf
-      end 
-    end
+        end
 
-    config.vm.define :mso do |mso|
-      mso.vm.hostname = 'mso'
-      mso.vm.network :private_network, ip: '192.168.50.7'
-      mso.vm.provider "openstack" do |v|
-        v.server_name = 'mso'
-      end
-      mso.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['mso']
-        s.env = conf
-      end 
-    end
+        # Set Box type
+        nodeconfig.vm.box = box[provider]
+        
+        # Set Node name
+        nodeconfig.vm.hostname = node[:name]
+        
+        # Set Sync Folder
+        nodeconfig.vm.synced_folder ".", "/vagrant", disabled: true
+        nodeconfig.vm.synced_folder './opt', '/opt/', create: true
+        nodeconfig.vm.synced_folder './lib', '/var/onap/', create: true
+        if !is_windows
+          nodeconfig.vm.synced_folder '~/.m2', '/root/.m2/', create: true
+        end
+        # Set Network 
+        nodeconfig.vm.network :private_network, ip: node[:ips][1]
+
+        # Specific settings:
+        
+        #Set Storage (For SDC or All-in-one)
+        if node[:name].include?("all-in-one") || node[:name].include?("sdc")
+          nodeconfig.vm.provider "virtualbox" do |v|
+              unless File.exist?(sdc_volume)
+                  v.customize ['createhd', '--filename', sdc_volume, '--size', 20 * 1024]
+              end
+              v.customize ['storageattach', :id, '--storagectl', 'SATAController', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', sdc_volume]
+            end
+            
+            nodeconfig.vm.provider "libvirt" do |v|
+                v.storage :file, path: sdc_volume, bus: 'sata', device: 'vdb', size: '2G'
+            end
+        end
+        
+        
+        if node[:name].include? "testing"
+            nodeconfig.vm.synced_folder './tests', '/var/onap_tests/', create: true
+            test_suite = ENV.fetch('TEST_SUITE', '*')
+            test_case = ENV.fetch('TEST_CASE', '*')
+            # Override variables
+            run_path = 'vagrant_utils/unit_testing.sh'
+            node[:args] = [test_suite, test_case]
+        end
+
+
+        if node[:name].include? "vfc"
+          nodeconfig.vm.provision 'docker'
+        end
+
+        nodeconfig.vm.provision 'shell' do |s|
+          s.path = run_path
+          s.args = node[:args]
+          s.env  = configuration
+        end
+
+      end #nodeconfig
+    end #node
+end #config
   
-    config.vm.define :robot do |robot|
-      robot.vm.hostname = 'robot'
-      robot.vm.network :private_network, ip: '192.168.50.8'
-      robot.vm.provider "openstack" do |v|
-        v.server_name = 'robot'
-      end
-      robot.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['robot']
-        s.env = conf
-      end
-    end
-
-    config.vm.define :vid do |vid|
-      vid.vm.hostname = 'vid'
-      vid.vm.network :private_network, ip: '192.168.50.9'
-      vid.vm.provider "openstack" do |v|
-        v.server_name = 'vid'
-      end
-      vid.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['vid']
-        s.env = conf
-      end
-    end
-
-    config.vm.define :sdnc do |sdnc|
-      sdnc.vm.hostname = 'sdnc'
-      sdnc.vm.network :private_network, ip: '192.168.50.10'
-      sdnc.vm.provider "openstack" do |v|
-        v.server_name = 'sdnc'
-      end
-      sdnc.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['sdnc']
-        s.env = conf
-      end
-    end
-
-    config.vm.define :portal do |portal|
-      portal.vm.hostname = 'portal'
-      portal.vm.network :private_network, ip: '192.168.50.11'
-      portal.vm.provider "openstack" do |v|
-        v.server_name = 'portal'
-      end
-      portal.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['portal']
-        s.env = conf
-      end
-    end
-
-    config.vm.define :dcae do |dcae|
-      dcae.vm.hostname = 'dcae'
-      dcae.vm.network :private_network, ip: '192.168.50.12'
-      dcae.vm.provider "openstack" do |v|
-        v.server_name = 'dcae'
-      end
-      dcae.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['dcae']
-        s.env = conf
-      end
-    end
-
-    config.vm.define :policy do |policy|
-      policy.vm.hostname = 'policy'
-      policy.vm.network :private_network, ip: '192.168.50.13'
-      policy.vm.provider "openstack" do |v|
-        v.server_name = 'policy'
-      end
-      policy.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['policy']
-        s.env = conf
-      end
-    end
-
-    config.vm.define :appc do |appc|
-      appc.vm.hostname = 'appc'
-      appc.vm.network :private_network, ip: '192.168.50.14'
-      appc.vm.provider "openstack" do |v|
-        v.server_name = 'appc'
-      end
-      appc.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['appc']
-        s.env = conf
-      end
-    end
-
-    config.vm.define :vfc do |vfc|
-      vfc.vm.hostname = 'vfc'
-      vfc.vm.network :private_network, ip: '192.168.50.15'
-      vfc.vm.provider "openstack" do |v|
-        v.server_name = 'vfc'
-      end
-      vfc.vm.provision 'docker'
-      vfc.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['vfc']
-        s.env = conf
-      end
-    end
-
-    config.vm.define :multicloud do |multicloud|
-      multicloud.vm.hostname = 'multicloud'
-      multicloud.vm.network :private_network, ip: '192.168.50.16'
-      multicloud.vm.provider "openstack" do |v|
-        v.server_name = 'multicloud'
-      end
-      multicloud.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['multicloud']
-        s.env = conf
-      end
-    end
-
-    config.vm.define :ccsdk do |ccsdk|
-      ccsdk.vm.hostname = 'ccsdk'
-      ccsdk.vm.network :private_network, ip: '192.168.50.17'
-      ccsdk.vm.provider "openstack" do |v|
-        v.server_name = 'ccsdk'
-      end
-      ccsdk.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/postinstall.sh'
-        s.args = ['ccsdk']
-        s.env = conf
-      end
-    end
-
-  when 'testing'
-
-    config.vm.define :testing do |testing|
-      test_suite = ENV.fetch('TEST_SUITE', '*')
-      test_case = ENV.fetch('TEST_CASE', '*')
-
-      testing.vm.hostname = 'testing'
-      testing.vm.network :private_network, ip: '192.168.50.3'
-      testing.vm.synced_folder './tests', '/var/onap_tests/', create: true
-      testing.vm.provider "virtualbox" do |v|
-        v.customize ["modifyvm", :id, "--memory", 4 * 1024]
-      end
-      testing.vm.provider "libvirt" do |v|
-        v.memory = 4 * 1024
-        v.nested = true
-      end
-      testing.vm.provider "openstack" do |v|
-        v.server_name = 'testing'
-        v.flavor      = 'm1.small'
-      end
-      testing.vm.provision 'shell' do |s|
-        s.path = 'vagrant_utils/unit_testing.sh'
-        s.args = [test_suite, test_case]
-        s.env = conf
-      end
-    end
-
-  end
-end
diff --git a/test/csit/plans/appc/healthcheck/bundle_query.sh b/test/csit/plans/appc/healthcheck/bundle_query.sh
index 7224ae9..f163ce5 100755
--- a/test/csit/plans/appc/healthcheck/bundle_query.sh
+++ b/test/csit/plans/appc/healthcheck/bundle_query.sh
@@ -24,8 +24,8 @@
 
 echo "There are $num_failed_bundles failed bundles out of $num_bundles installed bundles."
 
-if [ "$num_failed_bundles" -ge 1 ] || [ $num_bundles -le 393 ]; then
-  echo "There are $num_bundles bundles out of 394 with $num_failed_bundles in a failed state. "
+if [ "$num_failed_bundles" -ge 1 ] || [ $num_bundles -le 400 ]; then
+  echo "There are $num_bundles bundles with $num_failed_bundles in a failed state. "
   echo "The following bundle(s) are in a failed state: "
   echo "  $failed_bundles"
   exit 1;
diff --git a/test/csit/plans/appc/healthcheck/db_query.sh b/test/csit/plans/appc/healthcheck/db_query.sh
index 87e0ac3..70829a1 100755
--- a/test/csit/plans/appc/healthcheck/db_query.sh
+++ b/test/csit/plans/appc/healthcheck/db_query.sh
@@ -41,20 +41,20 @@
   exit 1;
 fi
 
-if [ "$NODE_TYPES" -eq "0" ]; then
-  echo "There is no data in table NODE_TYPES. "
-  exit 1;
-fi
+#if [ "$NODE_TYPES" -eq "0" ]; then
+#  echo "There is no data in table NODE_TYPES. "
+#  exit 1;
+#fi
 
-if [ "$SVC_LOGIC" -eq "0" ] ; then
-  echo "There is no data in table SVC_LOGIC. "
-  exit 1;
-fi
+#if [ "$SVC_LOGIC" -eq "0" ] ; then
+#  echo "There is no data in table SVC_LOGIC. "
+#  exit 1;
+#fi
 
-if [ "$VNF_DG_MAPPING" -eq "0" ]; then
-  echo "There is no data in table VNF_DG_MAPPING. "
-  exit 1;
-fi 
+#if [ "$VNF_DG_MAPPING" -eq "0" ]; then
+#  echo "There is no data in table VNF_DG_MAPPING. "
+#  exit 1;
+#fi 
 
 echo "Expected table data is present."
 exit 0 )
diff --git a/test/csit/plans/appc/healthcheck/setup.sh b/test/csit/plans/appc/healthcheck/setup.sh
index 3c57cef..eaf488a 100755
--- a/test/csit/plans/appc/healthcheck/setup.sh
+++ b/test/csit/plans/appc/healthcheck/setup.sh
@@ -20,6 +20,12 @@
 SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 source ${WORKSPACE}/test/csit/scripts/appc/script1.sh
 
+export NEXUS_USERNAME=docker
+export NEXUS_PASSWD=docker
+export NEXUS_DOCKER_REPO=nexus3.onap.org:10001
+export DMAAP_TOPIC=AUTO
+export DOCKER_IMAGE_VERSION=1.1.0-SNAPSHOT-latest
+
 export MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' | sort -n | head -1)
 
 if [ "$MTU" == "" ]; then
@@ -36,14 +42,14 @@
 unset http_proxy https_proxy
 cd $WORKSPACE/archives/appc/docker-compose
 
-sed -i "s/DMAAP_TOPIC_ENV=.*/DMAAP_TOPIC_ENV="AUTO"/g" docker-compose.yml
-docker login -u docker -p docker nexus3.onap.org:10001
+sed -i "s/DMAAP_TOPIC_ENV=.*/DMAAP_TOPIC_ENV="$DMAAP_TOPIC"/g" docker-compose.yml
+docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
 
-docker pull nexus3.onap.org:10001/openecomp/appc-image:1.1-STAGING-latest
-docker tag nexus3.onap.org:10001/openecomp/appc-image:1.1-STAGING-latest openecomp/appc-image:latest
+docker pull $NEXUS_DOCKER_REPO/openecomp/appc-image:$DOCKER_IMAGE_VERSION
+docker tag $NEXUS_DOCKER_REPO/openecomp/appc-image:$DOCKER_IMAGE_VERSION openecomp/appc-image:latest
 
-docker pull nexus3.onap.org:10001/openecomp/dgbuilder-sdnc-image:1.1-STAGING-latest
-docker tag nexus3.onap.org:10001/openecomp/dgbuilder-sdnc-image:1.1-STAGING-latest openecomp/dgbuilder-sdnc-image:latest
+docker pull $NEXUS_DOCKER_REPO/onap/ccsdk-dgbuilder-image:latest
+docker tag $NEXUS_DOCKER_REPO/onap/ccsdk-dgbuilder-image:latest onap/ccsdk-dgbuilder-image:latest
 
 # start APPC containers with docker compose and configuration from docker-compose.yml
 docker-compose up -d
@@ -71,7 +77,7 @@
 
 #sleep 800
 
-TIME_OUT=1500
+TIME_OUT=1000
 INTERVAL=60
 TIME=0
 while [ "$TIME" -lt "$TIME_OUT" ]; do
@@ -79,7 +85,7 @@
 response=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf system:start-level)
 num_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1)
 
-  if [ "$response" == "Level 100" ] && [ "$num_bundles" -ge 394 ]; then
+  if [ "$response" == "Level 100" ] && [ "$num_bundles" -ge 400 ]; then
     echo APPC karaf started in $TIME seconds
     break;
   fi
@@ -96,7 +102,7 @@
 response=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf system:start-level)
 num_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1)
 
-  if [ "$response" == "Level 100" ] && [ "$num_bundles" -ge 394 ]; then
+  if [ "$response" == "Level 100" ] && [ "$num_bundles" -ge 400 ]; then
     num_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1)
     num_failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure | wc -l)
     failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure)
@@ -111,3 +117,9 @@
 # Pass any variables required by Robot test suites in ROBOT_VARIABLES
 ROBOT_VARIABLES="-v SCRIPTS:${SCRIPTS}"
 
+if [ "$response" == "" ] || [ "$num_bundles" == "" ]; then
+  echo "Docker container appc_controller_container is not available. Exiting."
+  exit 1
+fi
+
+
diff --git a/test/csit/plans/appc/healthcheck/testplan.txt b/test/csit/plans/appc/healthcheck/testplan.txt
index 2a8c1ea..fbf2319 100644
--- a/test/csit/plans/appc/healthcheck/testplan.txt
+++ b/test/csit/plans/appc/healthcheck/testplan.txt
@@ -1,4 +1,5 @@
 # Test suites are relative paths under [integration.git]/test/csit/tests/.
 # Place the suites in run order.
 appc/healthcheck
+#appc/testsuite
 
diff --git a/test/csit/plans/sdc/healthCheck/setup.sh b/test/csit/plans/sdc/healthCheck/setup.sh
new file mode 100644
index 0000000..f247be6
--- /dev/null
+++ b/test/csit/plans/sdc/healthCheck/setup.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+# Place the scripts in run order:
+
+
+source ${WORKSPACE}/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh
+
+source ${WORKSPACE}/test/csit/scripts/sdc/start_sdc_containers.sh
+
+
+BE_IP=`get-instance-ip.sh sdc-BE`
+echo BE_IP=${BE_IP}
+
+
+# Pass any variables required by Robot test suites in ROBOT_VARIABLES
+ROBOT_VARIABLES="-v BE_IP:${BE_IP}"
+
diff --git a/test/csit/plans/sdc/healthCheck/teardown.sh b/test/csit/plans/sdc/healthCheck/teardown.sh
new file mode 100644
index 0000000..a5f6981
--- /dev/null
+++ b/test/csit/plans/sdc/healthCheck/teardown.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+
+source ${WORKSPACE}/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh
+
+# $WORKSPACE/archives/clamp-clone deleted with archives folder when tests starts so we keep it at the end for debugging
diff --git a/test/csit/plans/sdc/healthCheck/testplan.txt b/test/csit/plans/sdc/healthCheck/testplan.txt
new file mode 100644
index 0000000..2b2db1e
--- /dev/null
+++ b/test/csit/plans/sdc/healthCheck/testplan.txt
@@ -0,0 +1,3 @@
+# Test suites are relative paths under [integration.git]/test/csit/tests/.
+# Place the suites in run order.
+sdc/healthCheck
diff --git a/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh b/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh
new file mode 100644
index 0000000..da421e4
--- /dev/null
+++ b/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP CLAMP
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights
+#                             reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+
+echo "This is ${WORKSPACE}/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh"
+
+# Clone sdc enviroment template 
+mkdir -p ${WORKSPACE}/data/environments/
+mkdir -p ${WORKSPACE}/data/clone/
+
+cd ${WORKSPACE}/data/clone
+git clone --depth 1 http://gerrit.onap.org/r/sdc -b master
+
+
+# set enviroment variables
+
+ENV_NAME=CSIT
+MR_IP_ADDR=10.0.0.1
+
+if [ -e /opt/config/public_ip.txt ]
+  then
+    IP_ADDRESS=$(cat /opt/config/public_ip.txt)
+   else
+    IP_ADDRESS=$(ifconfig eth0 | grep "inet addr" | tr -s ' ' | cut -d' ' -f3 | cut -d':' -f2)
+   fi
+   
+  cat ${WORKSPACE}/data/clone/sdc/sdc-os-chef/environments/Template.json | sed "s/yyy/"$IP_ADDRESS"/g" > ${WORKSPACE}/data/environments/$ENV_NAME.json
+  sed -i "s/xxx/"$ENV_NAME"/g" ${WORKSPACE}/data/environments/$ENV_NAME.json
+  sed -i "s/\"ueb_url_list\":.*/\"ueb_url_list\": \""$MR_IP_ADDR","$MR_IP_ADDR"\",/g" ${WORKSPACE}/data/environments/$ENV_NAME.json
+  sed -i "s/\"fqdn\":.*/\"fqdn\": [\""$MR_IP_ADDR"\", \""$MR_IP_ADDR"\"]/g" ${WORKSPACE}/data/environments/$ENV_NAME.json
+
+
diff --git a/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh b/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh
new file mode 100644
index 0000000..e032842
--- /dev/null
+++ b/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+
+echo "This is ${WORKSPACE}/test/csit/scripts/sdc/kill_and_remove_dataFolder.sh"
+
+#kill and remove all sdc dockers
+docker stop $(docker ps -a -q --filter="name=sdc")
+docker rm $(docker ps -a -q --filter="name=sdc")
+
+
+#delete data folder
+
+rm -rf ${WORKSPACE}/data/*
+
+
diff --git a/test/csit/scripts/sdc/start_sdc_containers.sh b/test/csit/scripts/sdc/start_sdc_containers.sh
new file mode 100644
index 0000000..31105ac
--- /dev/null
+++ b/test/csit/scripts/sdc/start_sdc_containers.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP CLAMP
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights
+#                             reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+
+echo "This is ${WORKSPACE}/test/csit/scripts/sdc/start_sdc_containers.sh"
+
+
+RELEASE=latest
+LOCAL=false
+SKIPTESTS=false
+DEP_ENV=CSIT
+#[ -f /opt/config/nexus_username.txt ] && NEXUS_USERNAME=$(cat /opt/config/nexus_username.txt)    || NEXUS_USERNAME=release
+#[ -f /opt/config/nexus_password.txt ] && NEXUS_PASSWD=$(cat /opt/config/nexus_password.txt)      || NEXUS_PASSWD=sfWU3DFVdBr7GVxB85mTYgAW
+#[ -f /opt/config/nexus_docker_repo.txt ] && NEXUS_DOCKER_REPO=$(cat /opt/config/nexus_docker_repo.txt) || NEXUS_DOCKER_REPO=ecomp-nexus:${PORT}
+#[ -f /opt/config/nexus_username.txt ] && docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
+export IP=`ifconfig eth0 | awk -F: '/inet addr/ {gsub(/ .*/,"",$2); print $2}'`
+#export PREFIX=${NEXUS_DOCKER_REPO}'/openecomp'
+export PREFIX='nexus3.onap.org:10001/openecomp'
+
+#start Elastic-Search
+docker run --detach --name sdc-es --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --memory 1g --memory-swap=1g --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro -e ES_HEAP_SIZE=1024M --volume ${WORKSPACE}/data/ES:/usr/share/elasticsearch/data --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9200:9200 --publish 9300:9300 ${PREFIX}/sdc-elasticsearch:${RELEASE}
+
+#start cassandra
+docker run --detach --name sdc-cs --env RELEASE="${RELEASE}" --env ENVNAME="${DEP_ENV}" --env HOST_IP=${IP} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9042:9042 --publish 9160:9160 ${PREFIX}/sdc-cassandra:${RELEASE}
+
+echo "please wait while CS is starting..."
+echo ""
+c=120 # seconds to wait
+REWRITE="\e[25D\e[1A\e[K"
+while [ $c -gt 0 ]; do
+    c=$((c-1))
+    sleep 1
+    echo -e "${REWRITE}$c"
+done
+echo -e ""
+
+
+#start kibana
+docker run --detach --name sdc-kbn --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --memory 2g --memory-swap=2g --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 5601:5601 ${PREFIX}/sdc-kibana:${RELEASE}
+
+#start sdc-backend
+docker run --detach --name sdc-BE --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env http_proxy=${http_proxy} --env https_proxy=${https_proxy} --env no_proxy=${no_proxy} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --memory 4g --memory-swap=4g --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/logs/BE/:/var/lib/jetty/logs  --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 8443:8443 --publish 8080:8080 ${PREFIX}/sdc-backend:${RELEASE}
+
+echo "please wait while BE is starting..."
+echo ""
+c=120 # seconds to wait
+REWRITE="\e[45D\e[1A\e[K"
+while [ $c -gt 0 ]; do
+    c=$((c-1))
+    sleep 1
+    echo -e "${REWRITE}$c"
+done
+echo -e ""
+
+#start Front-End
+docker run --detach --name sdc-FE --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env http_proxy=${http_proxy} --env https_proxy=${https_proxy} --env no_proxy=${no_proxy} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --memory 2g --memory-swap=2g --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro  --volume ${WORKSPACE}/data/logs/FE/:/var/lib/jetty/logs --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9443:9443 --publish 8181:8181 ${PREFIX}/sdc-frontend:${RELEASE}
+
+echo "please wait while FE is starting..."
+echo ""
+c=120 # seconds to wait
+REWRITE="\e[45D\e[1A\e[K"
+while [ $c -gt 0 ]; do
+    c=$((c-1))
+    sleep 1
+    echo -e "${REWRITE}$c"
+done
+echo -e ""
+
+
+
+
+#TIME=0
+#while [ "$TIME" -lt "$TIME_OUT" ]; do
+#  response=$(curl --write-out '%{http_code}' --silent --output /dev/null http://localhost:8080/restservices/clds/v1/clds/healthcheck); echo $response
+
+#  if [ "$response" == "200" ]; then
+#    echo Clamp and its database well started in $TIME seconds
+#    break;
+#  fi
+
+#  echo Sleep: $INTERVAL seconds before testing if Clamp is up. Total wait time up now is: $TIME seconds. Timeout is: $TIME_OUT seconds
+#  sleep $INTERVAL
+# TIME=$(($TIME+$INTERVAL))
+#done
+
+#if [ "$TIME" -ge "$TIME_OUT" ]; then
+#   echo TIME OUT: Docker containers not started in $TIME_OUT seconds... Could cause problems for tests...
+#fi
diff --git a/test/csit/tests/sdc/healthCheck/__init__.robot b/test/csit/tests/sdc/healthCheck/__init__.robot
new file mode 100644
index 0000000..8ee10d5
--- /dev/null
+++ b/test/csit/tests/sdc/healthCheck/__init__.robot
@@ -0,0 +1,2 @@
+*** Settings ***
+Documentation    Sdc - HealthCheck
diff --git a/test/csit/tests/sdc/healthCheck/test1.robot b/test/csit/tests/sdc/healthCheck/test1.robot
new file mode 100644
index 0000000..6d4dc24
--- /dev/null
+++ b/test/csit/tests/sdc/healthCheck/test1.robot
@@ -0,0 +1,16 @@
+*** Settings ***
+Library           Collections
+Library           OperatingSystem
+Library           RequestsLibrary
+Library           json
+
+*** Test Cases ***
+Get Requests health check ok
+    [Tags]    get
+    CreateSession    sdc-be    http://localhost:8080
+    ${headers}=    Create Dictionary    Accept=application/json    Content-Type=application/json
+    ${resp}=    Get Request    sdc-be    /sdc2/rest/healthCheck    headers=&{headers}
+    Should Be Equal As Strings    ${resp.status_code}    200
+    @{ITEMS}=    Copy List    ${resp.json()['componentsInfo']}
+    : FOR    ${ELEMENT}    IN    @{ITEMS}
+    \    Log    ${ELEMENT['healthCheckComponent']} ${ELEMENT['healthCheckStatus']}
diff --git a/test/csit/tests/so/sanity-check/sanity_test_so.robot b/test/csit/tests/so/sanity-check/sanity_test_so.robot
index 2e05c50..73a9f3f 100644
--- a/test/csit/tests/so/sanity-check/sanity_test_so.robot
+++ b/test/csit/tests/so/sanity-check/sanity_test_so.robot
@@ -13,59 +13,59 @@
     Create Session   refrepo  http://${REPO_IP}:8080
     ${data}=    Get Binary File     ${CURDIR}${/}data${/}createService.json
     &{headers}=  Create Dictionary    Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA==    Content-Type=application/json    Accept=application/json
-    ${resp}=    Post Request    refrepo    /ecomp/mso/infra/serviceInstances/v2    data=${data}    headers=${headers}
+    ${resp}=    Post Request    refrepo    /ecomp/mso/infra/serviceInstances/v3    data=${data}    headers=${headers}
     Run Keyword If  '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405'  log to console  \nexecuted with expected result 
 	
 Create ServiceInstance for invalid user
     Create Session   refrepo  http://${REPO_IP}:8080
     ${data}=    Get Binary File     ${CURDIR}${/}data${/}createService.json
     &{headers}=  Create Dictionary    Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQxOnBhc3N3b3JkMTI=    Content-Type=application/json    Accept=application/json
-    ${resp}=    Post Request    refrepo    /ecomp/mso/infra/serviceInstances/v2    data=${data}    headers=${headers}
+    ${resp}=    Post Request    refrepo    /ecomp/mso/infra/serviceInstances/v3    data=${data}    headers=${headers}
     Run Keyword If  '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405'  log to console  \nexecuted with expected result	
 
 Delete ServiceInstance for invalid input
     Create Session   refrepo  http://${REPO_IP}:8080
     ${data}=    Get Binary File     ${CURDIR}${/}data${/}deleteService.json
     &{headers}=  Create Dictionary    Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA==    Content-Type=application/json    Accept=application/json
-    ${resp}=    Delete Request    refrepo    /ecomp/mso/infra/serviceInstances/v2/ff305d54-75b4-431b-adb2-eb6b9e5ff000    data=${data}    headers=${headers}
+    ${resp}=    Delete Request    refrepo    /ecomp/mso/infra/serviceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000    data=${data}    headers=${headers}
     Run Keyword If  '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405'  log to console  \nexecuted with expected result    
 	
 Delete ServiceInstance for invalid user
     Create Session   refrepo  http://${REPO_IP}:8080
     ${data}=    Get Binary File     ${CURDIR}${/}data${/}deleteService.json
     &{headers}=  Create Dictionary    Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQxOnBhc3N3b3JkMTI==    Content-Type=application/json    Accept=application/json
-    ${resp}=    Delete Request    refrepo    /ecomp/mso/infra/serviceInstances/v2/ff305d54-75b4-431b-adb2-eb6b9e5ff000    data=${data}    headers=${headers}
+    ${resp}=    Delete Request    refrepo    /ecomp/mso/infra/serviceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000    data=${data}    headers=${headers}
     Run Keyword If  '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405'  log to console  \nexecuted with expected result
 	
 SO ServiceInstance health check
     Create Session   refrepo  http://${REPO_IP}:8080
     &{headers}=  Create Dictionary    Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA==    Content-Type=application/json    Accept=application/json
-    ${resp}=    Get Request    refrepo    /ecomp/mso/infra/orchestrationRequests/v2/rq1234d1-5a33-55df-13ab-12abad84e333    headers=${headers}
+    ${resp}=    Get Request    refrepo    /ecomp/mso/infra/orchestrationRequests/v3/rq1234d1-5a33-55df-13ab-12abad84e333    headers=${headers}
     Should Not Contain     ${resp.content}      null
 
 Create VnfInstance for invalid input
     Create Session   refrepo  http://${REPO_IP}:8080
     ${data}=    Get Binary File     ${CURDIR}${/}data${/}createVnf.json
     &{headers}=  Create Dictionary    Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA==    Content-Type=application/json    Accept=application/json
-    ${resp}=    Post Request    refrepo    /ecomp/mso/infra/serviceInstances/v2/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs    data=${data}    headers=${headers}
+    ${resp}=    Post Request    refrepo    /ecomp/mso/infra/serviceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs    data=${data}    headers=${headers}
     Run Keyword If  '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405'  log to console  \nexecuted with expected result
 	
 Create VnfInstance for invalid credential
     Create Session   refrepo  http://${REPO_IP}:8080
     ${data}=    Get Binary File     ${CURDIR}${/}data${/}createVnf.json
     &{headers}=  Create Dictionary    Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQxOnBhc3N3b3JkMTI=    Content-Type=application/json    Accept=application/json
-    ${resp}=    Post Request    refrepo    /ecomp/mso/infra/serviceInstances/v2/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs    data=${data}    headers=${headers}
+    ${resp}=    Post Request    refrepo    /ecomp/mso/infra/serviceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs    data=${data}    headers=${headers}
     Run Keyword If  '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405'  log to console  \nexecuted with expected result    
 	
 Delete VnfInstance for invalid input
     Create Session   refrepo  http://${REPO_IP}:8080
     ${data}=    Get Binary File     ${CURDIR}${/}data${/}deleteVnf.json
     &{headers}=  Create Dictionary    Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA==    Content-Type=application/json    Accept=application/json
-    ${resp}=    Delete Request    refrepo    /ecomp/mso/infra/serviceInstances/v2/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs/aca51b0a-710d-4155-bc7c-7cef19d9a94e    data=${data}    headers=${headers}
+    ${resp}=    Delete Request    refrepo    /ecomp/mso/infra/serviceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs/aca51b0a-710d-4155-bc7c-7cef19d9a94e    data=${data}    headers=${headers}
     Run Keyword If  '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405'  log to console  \nexecuted with expected result
 	
 Get Orchestration Requests
     Create Session   refrepo  http://${REPO_IP}:8080
     &{headers}=  Create Dictionary    Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA==    Content-Type=application/json    Accept=application/json
-    ${resp}=    Get Request    refrepo    /ecomp/mso/infra/orchestrationRequests/v2    headers=${headers}
+    ${resp}=    Get Request    refrepo    /ecomp/mso/infra/orchestrationRequests/v3    headers=${headers}
     Should Not Contain     ${resp.content}      null
\ No newline at end of file
diff --git a/test/csit/tests/vnfsdk-marketplace/provision/sanity_test_vnfsdktestfunction.robot b/test/csit/tests/vnfsdk-marketplace/provision/sanity_test_vnfsdktestfunction.robot
index b06d5b5..c57642a 100644
--- a/test/csit/tests/vnfsdk-marketplace/provision/sanity_test_vnfsdktestfunction.robot
+++ b/test/csit/tests/vnfsdk-marketplace/provision/sanity_test_vnfsdktestfunction.robot
@@ -25,6 +25,15 @@
     Create Session   refrepo  http://${REPO_IP}:8702
     &{headers}=  Create Dictionary      Content-Type=application/json
     ${resp}=    Get Request    refrepo   /openoapi/vnfsdk-marketplace/v1/PackageResource/csars/${csarId}   headers=${headers}
+    ${response_json}    json.loads    ${resp.content}
+    ${downloadUri}=    Convert To String      ${response_json['downloadUri']}
+    Should Contain    ${downloadUri}     ${csarId}
+    Should Be Equal As Strings  ${resp.status_code}     200
+
+Get List Of Requests 
+    Create Session   refrepo  http://${REPO_IP}:8702
+    &{headers}=  Create Dictionary      Content-Type=application/json
+    ${resp}=    Get Request    refrepo   /openoapi/vnfsdk-marketplace/v1/PackageResource/csars?name=enterprise2DC&version=1.0&type=SSAR&provider=huawei   headers=${headers}	
     Should Be Equal As Strings  ${resp.status_code}     200
 
 Download VNF Package from Repository
@@ -32,9 +41,15 @@
     &{headers}=  Create Dictionary      Content-Type=application/json
     ${resp}=    Get Request    refrepo   /openoapi/vnfsdk-marketplace/v1/PackageResource/csars/${csarId}/files   headers=${headers}
     Should Be Equal As Strings  ${resp.status_code}     200
+    ${downloadUri}=    Convert To String    ${resp.content}
+    ${downloadUri1}=    Run    curl http://${REPO_IP}:8702/openoapi/vnfsdk-marketplace/v1/PackageResource/csars/${csarId}/files
+    ${string}=    Convert To String    ${downloadUri1}
+    Should Contain    ${downloadUri1}    '  % Total    % Received % Xferd  Average
+    Should Contain    ${string}    '  % Total    % Received % Xferd  Average
 
 Delete VNF Package from Repository
     Create Session   refrepo  http://${REPO_IP}:8702
     &{headers}=  Create Dictionary      Content-Type=application/json
     ${resp}=    Delete Request    refrepo    /openoapi/vnfsdk-marketplace/v1/PackageResource/csars/${csarId}   headers=${headers}
     Should Be Equal As Strings  ${resp.status_code}     200
+
diff --git a/test/mock/pom.xml b/test/mock/pom.xml
new file mode 100644
index 0000000..56aad78
--- /dev/null
+++ b/test/mock/pom.xml
@@ -0,0 +1,142 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.onap.integration</groupId>
+    <artifactId>mock</artifactId>
+    <version>1.0-SNAPSHOT</version>
+    <name>mock</name>
+    <description>onap emulator project based on  Spring Boot</description>
+
+<parent>
+    <groupId>org.springframework.boot</groupId>
+    <artifactId>spring-boot-starter-parent</artifactId>
+    <version>1.5.7.RELEASE</version>
+    <relativePath/> <!-- lookup parent from repository -->
+</parent>
+
+<properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
+    <java.version>1.8</java.version>
+    <versions.jackson>2.8.9</versions.jackson>
+    <jetty.version>9.2.22.v20170606</jetty.version>
+</properties>
+
+<dependencies>
+    <dependency>
+        <groupId>org.springframework.boot</groupId>
+        <artifactId>spring-boot-starter</artifactId>
+    </dependency>
+
+    <dependency>
+        <groupId>org.springframework.boot</groupId>
+        <artifactId>spring-boot-starter-test</artifactId>
+        <scope>test</scope>
+    </dependency>
+    <dependency>
+        <groupId>org.springframework.cloud</groupId>
+        <artifactId>spring-cloud-contract-wiremock</artifactId>
+        <version>1.1.3.RELEASE</version>
+        <scope>compile</scope>
+    </dependency>
+    <dependency>
+        <groupId>net.sf.jopt-simple</groupId>
+        <artifactId>jopt-simple</artifactId>
+        <version>5.0.3</version>
+    </dependency>
+    <dependency>
+        <groupId>org.eclipse.jetty</groupId>
+        <artifactId>jetty-server</artifactId>
+        <version>9.2.22.v20170606</version>
+    </dependency>
+    <dependency>
+        <groupId>org.eclipse.jetty</groupId>
+        <artifactId>jetty-servlet</artifactId>
+        <version>9.2.22.v20170606</version>
+    </dependency>
+    <dependency>
+        <groupId>org.eclipse.jetty</groupId>
+        <artifactId>jetty-servlets</artifactId>
+        <version>9.2.22.v20170606</version>
+    </dependency>
+    <dependency>
+        <groupId>org.eclipse.jetty</groupId>
+        <artifactId>jetty-webapp</artifactId>
+        <version>9.2.22.v20170606</version>
+    </dependency>
+    <dependency>
+        <groupId>com.google.guava</groupId>
+        <artifactId>guava</artifactId>
+        <version>20.0</version>
+    </dependency>
+    <dependency>
+        <groupId>com.fasterxml.jackson.core</groupId>
+        <artifactId>jackson-core</artifactId>
+        <version>${versions.jackson}</version>
+    </dependency>
+    <dependency>
+        <groupId>com.fasterxml.jackson.core</groupId>
+        <artifactId>jackson-annotations</artifactId>
+        <version>${versions.jackson}</version>
+    </dependency>
+    <dependency>
+        <groupId>com.fasterxml.jackson.core</groupId>
+        <artifactId>jackson-databind</artifactId>
+        <version>${versions.jackson}</version>
+    </dependency>
+    <dependency>
+        <groupId>org.apache.httpcomponents</groupId>
+        <artifactId>httpclient</artifactId>
+    </dependency>
+    <dependency>
+        <groupId>org.xmlunit</groupId>
+        <artifactId>xmlunit-core</artifactId>
+        <version>2.3.0</version>
+    </dependency>
+    <dependency>
+        <groupId>org.xmlunit</groupId>
+        <artifactId>xmlunit-legacy</artifactId>
+        <version>2.3.0</version>
+    </dependency>
+    <dependency>
+        <groupId>com.jayway.jsonpath</groupId>
+        <artifactId>json-path</artifactId>
+        <version>2.4.0</version>
+    </dependency>
+    <dependency>
+        <groupId>org.slf4j</groupId>
+        <artifactId>slf4j-api</artifactId>
+        <version>1.7.12</version>
+    </dependency>
+    <dependency>
+        <groupId>org.apache.commons</groupId>
+        <artifactId>commons-lang3</artifactId>
+        <version>3.6</version>
+    </dependency>
+    <dependency>
+        <groupId>com.flipkart.zjsonpatch</groupId>
+        <artifactId>zjsonpatch</artifactId>
+        <version>0.3.0</version>
+    </dependency>
+    <dependency>
+        <groupId>com.github.jknack</groupId>
+        <artifactId>handlebars</artifactId>
+        <version>4.0.6</version>
+    </dependency>
+</dependencies>
+
+<build>
+    <finalName>${project.artifactId}</finalName>
+    <plugins>
+        <plugin>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-maven-plugin</artifactId>
+        </plugin>
+        <plugin>
+            <artifactId>maven-dependency-plugin</artifactId>
+        </plugin>
+    </plugins>
+</build>
+</project>
diff --git a/test/mock/src/main/docker/Dockerfile b/test/mock/src/main/docker/Dockerfile
new file mode 100644
index 0000000..b1bf4d9
--- /dev/null
+++ b/test/mock/src/main/docker/Dockerfile
@@ -0,0 +1,19 @@
+FROM openjdk:8-jre
+
+MAINTAINER Geora Barsky <georab@amdocs.com>
+
+RUN mkdir -p /var/wiremock/lib/ 
+
+ADD mock.jar /var/wiremock/lib/app.jar
+
+WORKDIR /home/wiremock
+
+COPY docker-entrypoint.sh /
+RUN chmod 700 /docker-entrypoint.sh
+
+VOLUME /home/wiremock
+EXPOSE 8080 8081 9999
+
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+CMD ["java", "-jar","/var/wiremock/lib/app.jar"]
\ No newline at end of file
diff --git a/test/mock/src/main/docker/docker-entrypoint.sh b/test/mock/src/main/docker/docker-entrypoint.sh
new file mode 100644
index 0000000..47364a2
--- /dev/null
+++ b/test/mock/src/main/docker/docker-entrypoint.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+touch /app.jar
+
+java -Xms1024m -Xmx1024m -jar /var/wiremock/lib/app.jar
\ No newline at end of file
diff --git a/test/mock/src/main/java/org/onap/integration/test/mock/MockApplication.java b/test/mock/src/main/java/org/onap/integration/test/mock/MockApplication.java
new file mode 100644
index 0000000..115cb25
--- /dev/null
+++ b/test/mock/src/main/java/org/onap/integration/test/mock/MockApplication.java
@@ -0,0 +1,122 @@
+package org.onap.integration.test.mock;
+
+import static com.github.tomakehurst.wiremock.client.ResponseDefinitionBuilder.responseDefinition;
+import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl;
+import static com.github.tomakehurst.wiremock.core.WireMockApp.FILES_ROOT;
+import static com.github.tomakehurst.wiremock.core.WireMockApp.MAPPINGS_ROOT;
+import static com.github.tomakehurst.wiremock.http.RequestMethod.ANY;
+import static com.github.tomakehurst.wiremock.matching.RequestPatternBuilder.newRequestPattern;
+import static java.lang.System.out;
+
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+
+import com.github.tomakehurst.wiremock.WireMockServer;
+import com.github.tomakehurst.wiremock.common.ConsoleNotifier;
+import com.github.tomakehurst.wiremock.common.FatalStartupException;
+import com.github.tomakehurst.wiremock.common.FileSource;
+import com.github.tomakehurst.wiremock.core.WireMockConfiguration;
+import com.github.tomakehurst.wiremock.http.ResponseDefinition;
+import com.github.tomakehurst.wiremock.matching.RequestPattern;
+import com.github.tomakehurst.wiremock.standalone.MappingsLoader;
+import com.github.tomakehurst.wiremock.stubbing.StubMapping;
+import com.github.tomakehurst.wiremock.stubbing.StubMappings;
+
+@SpringBootApplication
+public class MockApplication {
+
+    
+	private static final String BANNER= " \n" +
+"          ********                                      ****     ****                        ##        \n" +
+"         **######**                                     ###*     *###                        ##        \n" +
+"        *##******##*                                    ##***   ***##                        ##\n" +
+"	    **#*      *#**                                   ##*#*   *#*##                        ##        \n" +
+"	    *#*        *#*  ##******   *******   ##******    ##*#*   *#*##    *******    ******   ##    *** \n" +
+"	    *#*        *#*  ##*####*  *######*   ##*####**   ##*#*   *#*##   **#####**  **####**  ##   *#** \n" +
+"	    *#*        *#*  ##****#*  *#****#*   ##** **#*   ## *** *** ##   *#** **#*  *#****#*  ## **#** \n" +
+"	    *#          #*  ##*  *#*        #*   ##*   *#*   ## *#* *#* ##   *#*   *#*  *#*  *#*  ##**#** \n" +
+"	    *#*        *#*  ##*   ##    ****##   ##*   *#*   ## *#* *#* ##   *#*   *#*  *#*       ##*##* \n" +	 
+"	    *#*        *#*  ##    ##  **######   ##     #*   ## *#* *#* ##   *#     #*  *#        ##**#** \n" +
+"	    *#*        *#*  ##    ##  *#****##   ##*   *#*   ##  *#*#*  ##   *#*   *#*  *#*       ##**##* \n" +
+"	    **#*      *#**  ##    ##  *#*  *##   ##*   *#*   ##  *#*#*  ##   *#*   *#*  *#*  *#*  ##  *#** \n" +
+"	     *##******##*   ##    ##  *#* **##*  ##** **#*   ##  *#*#*  ##   *#** **#*  *#****#*  ##  **#* \n" +
+"	      **######**    ##    ##  *#######*  ##*####*    ##  *###*  ##   **#####**  **####**  ##   *#** \n" +
+"	       ********     ##    ##  *******#*  ##******    ##   *#*   ##    *******    ******   ##    *#* \n" +
+"                                            ##  \n" +
+"                                            ##  \n" +
+"                                            ##  \n" +
+"                                            **  \n" ;
+					
+    static {
+        System.setProperty("org.mortbay.log.class", "com.github.tomakehurst.wiremock.jetty.LoggerAdapter");
+    }
+
+	private WireMockServer wireMockServer;
+	
+	public static void main(String[] args) {
+		SpringApplication.run(MockApplication.class, args);
+		//new WireMockServerRunner().run("--port 9999");
+		new MockApplication().run(args);
+	}
+	
+	public void run(String... args) {
+
+		WireMockConfiguration options = WireMockConfiguration.options();
+        options.port(9999);
+		FileSource fileSource = options.filesRoot();
+		fileSource.createIfNecessary();
+		FileSource filesFileSource = fileSource.child(FILES_ROOT);
+		filesFileSource.createIfNecessary();
+		FileSource mappingsFileSource = fileSource.child(MAPPINGS_ROOT);
+		mappingsFileSource.createIfNecessary();
+		
+		// Register extension
+		options.extensions("org.onap.integration.test.mock.extension.Webhooks");
+		// Register notifier
+        options.notifier(new ConsoleNotifier(true));   
+        wireMockServer = new WireMockServer(options);
+        
+        wireMockServer.enableRecordMappings(mappingsFileSource, filesFileSource);
+
+		//if (options.specifiesProxyUrl()) {
+		//	addProxyMapping(options.proxyUrl());
+		//}
+
+        try {
+            wireMockServer.start();
+            out.println(BANNER);
+            out.println();
+            out.println(options);
+        } catch (FatalStartupException e) {
+            System.err.println(e.getMessage());
+            System.exit(1);
+        }
+    }
+	
+	private void addProxyMapping(final String baseUrl) {
+		wireMockServer.loadMappingsUsing(new MappingsLoader() {
+			@Override
+			public void loadMappingsInto(StubMappings stubMappings) {
+                RequestPattern requestPattern = newRequestPattern(ANY, anyUrl()).build();
+				ResponseDefinition responseDef = responseDefinition()
+						.proxiedFrom(baseUrl)
+						.build();
+
+				StubMapping proxyBasedMapping = new StubMapping(requestPattern, responseDef);
+				proxyBasedMapping.setPriority(10); // Make it low priority so that existing stubs will take precedence
+				stubMappings.addMapping(proxyBasedMapping);
+			}
+		});
+	}
+	
+	public void stop() {
+		wireMockServer.stop();
+	}
+
+    public boolean isRunning() {
+        return wireMockServer.isRunning();
+    }
+
+    public int port() { return wireMockServer.port(); }	
+	
+}
diff --git a/test/mock/src/main/java/org/onap/integration/test/mock/extension/WebhookDefinition.java b/test/mock/src/main/java/org/onap/integration/test/mock/extension/WebhookDefinition.java
new file mode 100644
index 0000000..dff99fd
--- /dev/null
+++ b/test/mock/src/main/java/org/onap/integration/test/mock/extension/WebhookDefinition.java
@@ -0,0 +1,101 @@
+package org.onap.integration.test.mock.extension;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.github.tomakehurst.wiremock.http.Body;
+import com.github.tomakehurst.wiremock.http.HttpHeader;
+import com.github.tomakehurst.wiremock.http.HttpHeaders;
+import com.github.tomakehurst.wiremock.http.RequestMethod;
+
+import java.net.URI;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+public class WebhookDefinition {
+    
+    private RequestMethod method;
+    private URI url;
+    private List<HttpHeader> headers;
+    private Body body = Body.none();
+
+    @JsonCreator
+    public WebhookDefinition(@JsonProperty("method") RequestMethod method,
+                             @JsonProperty("url") URI url,
+                             @JsonProperty("headers") HttpHeaders headers,
+                             @JsonProperty("body") String body,
+                             @JsonProperty("base64Body") String base64Body) {
+        this.method = method;
+        this.url = url;
+        this.headers = newArrayList(headers.all());
+        this.body = Body.fromOneOf(null, body, null, base64Body);
+    }
+
+    public WebhookDefinition() {
+    }
+
+    public RequestMethod getMethod() {
+        return method;
+    }
+
+    public URI getUrl() {
+        return url;
+    }
+
+    public HttpHeaders getHeaders() {
+        return new HttpHeaders(headers);
+    }
+
+    public String getBase64Body() {
+        return body.isBinary() ? body.asBase64() : null;
+    }
+
+    public String getBody() {
+        return body.isBinary() ? null : body.asString();
+    }
+
+    @JsonIgnore
+    public byte[] getBinaryBody() {
+        return body.asBytes();
+    }
+
+    public WebhookDefinition withMethod(RequestMethod method) {
+        this.method = method;
+        return this;
+    }
+
+    public WebhookDefinition withUrl(URI url) {
+        this.url = url;
+        return this;
+    }
+
+    public WebhookDefinition withUrl(String url) {
+        withUrl(URI.create(url));
+        return this;
+    }
+
+    public WebhookDefinition withHeaders(List<HttpHeader> headers) {
+        this.headers = headers;
+        return this;
+    }
+
+    public WebhookDefinition withHeader(String key, String... values) {
+        if (headers == null) {
+            headers = newArrayList();
+        }
+
+        headers.add(new HttpHeader(key, values));
+        return this;
+    }
+
+    public WebhookDefinition withBody(String body) {
+        this.body = new Body(body);
+        return this;
+    }
+
+    public WebhookDefinition withBinaryBody(byte[] body) {
+        this.body = new Body(body);
+        return this;
+    }
+}
diff --git a/test/mock/src/main/java/org/onap/integration/test/mock/extension/Webhooks.java b/test/mock/src/main/java/org/onap/integration/test/mock/extension/Webhooks.java
new file mode 100644
index 0000000..cb17ba6
--- /dev/null
+++ b/test/mock/src/main/java/org/onap/integration/test/mock/extension/Webhooks.java
@@ -0,0 +1,100 @@
+package org.onap.integration.test.mock.extension;
+
+import com.github.tomakehurst.wiremock.common.Notifier;
+import com.github.tomakehurst.wiremock.core.Admin;
+import com.github.tomakehurst.wiremock.extension.Parameters;
+import com.github.tomakehurst.wiremock.extension.PostServeAction;
+import com.github.tomakehurst.wiremock.http.HttpClientFactory;
+import com.github.tomakehurst.wiremock.http.HttpHeader;
+import com.github.tomakehurst.wiremock.stubbing.ServeEvent;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.entity.ByteArrayEntity;
+import org.apache.http.util.EntityUtils;
+
+import java.io.IOException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+
+import static com.github.tomakehurst.wiremock.common.Exceptions.throwUnchecked;
+import static com.github.tomakehurst.wiremock.common.LocalNotifier.notifier;
+import static com.github.tomakehurst.wiremock.http.HttpClientFactory.getHttpRequestFor;
+import static java.util.concurrent.TimeUnit.SECONDS;
+
+public class Webhooks extends PostServeAction {
+
+    private final ScheduledExecutorService scheduler;
+    private final HttpClient httpClient;
+
+    public Webhooks() {
+        scheduler = Executors.newScheduledThreadPool(10);
+        httpClient = HttpClientFactory.createClient();
+    }
+
+    @Override
+    public String getName() {
+        return "webhook";
+    }
+
+    @Override
+    public void doAction(ServeEvent serveEvent, Admin admin, Parameters parameters) {
+        final WebhookDefinition definition = parameters.as(WebhookDefinition.class);
+        final Notifier notifier = notifier();
+
+        scheduler.schedule(
+            new Runnable() {
+                @Override
+                public void run() {
+                    HttpUriRequest request = buildRequest(definition);
+
+                    try {
+                        HttpResponse response = httpClient.execute(request);
+                        notifier.info(
+                            String.format("Webhook %s request to %s returned status %s\n\n%s",
+                                definition.getMethod(),
+                                definition.getUrl(),
+                                response.getStatusLine(),
+                                EntityUtils.toString(response.getEntity())
+                            )                            
+                        );
+                        System.out.println(String.format("Webhook %s request to %s returned status %s\n\n%s",
+                                	definition.getMethod(),
+                                	definition.getUrl(),
+                                	response.getStatusLine(),
+                                	EntityUtils.toString(response.getEntity())                              
+                        		)
+                        );
+                    } catch (IOException e) {
+                        throwUnchecked(e);
+                    }
+                }
+            },
+            0L,
+            SECONDS
+        );
+    }
+
+    private static HttpUriRequest buildRequest(WebhookDefinition definition) {
+        HttpUriRequest request = getHttpRequestFor(
+                definition.getMethod(),
+                definition.getUrl().toString()
+        );
+
+        for (HttpHeader header: definition.getHeaders().all()) {
+            request.addHeader(header.key(), header.firstValue());
+        }
+
+        if (definition.getMethod().hasEntity()) {
+            HttpEntityEnclosingRequestBase entityRequest = (HttpEntityEnclosingRequestBase) request;
+            entityRequest.setEntity(new ByteArrayEntity(definition.getBinaryBody()));
+        }
+
+        return request;
+    }
+
+    public static WebhookDefinition webhook() {
+        return new WebhookDefinition();
+    }
+}
diff --git a/test/mock/src/main/resources/application.properties b/test/mock/src/main/resources/application.properties
new file mode 100644
index 0000000..51ad5eb
--- /dev/null
+++ b/test/mock/src/main/resources/application.properties
@@ -0,0 +1 @@
+server.port=9090
diff --git a/test/mock/src/test/java/org/onap/integration/test/mock/MockApplicationTests.java b/test/mock/src/test/java/org/onap/integration/test/mock/MockApplicationTests.java
new file mode 100644
index 0000000..8d2a046
--- /dev/null
+++ b/test/mock/src/test/java/org/onap/integration/test/mock/MockApplicationTests.java
@@ -0,0 +1,16 @@
+package org.onap.integration.test.mock;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.boot.test.context.SpringBootTest;
+import org.springframework.test.context.junit4.SpringRunner;
+
+@RunWith(SpringRunner.class)
+@SpringBootTest
+public class MockApplicationTests {
+
+	@Test
+	public void contextLoads() {
+	}
+
+}