Adding basic E2E test for workflow designer
Change-Id: I45c3fb5a8b26fdd75084c07a9f934b8800259c35
Signed-off-by: Ashish-uzumaki <1998ashishsingh@gmail.com>
Issue-ID: SDC-3193
Signed-off-by: Ashish-uzumaki <1998ashishsingh@gmail.com>
diff --git a/plans/sdc-workflow-d/setup.sh b/plans/sdc-workflow-d/setup.sh
new file mode 100644
index 0000000..30f1c0f
--- /dev/null
+++ b/plans/sdc-workflow-d/setup.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# Copyright 2019 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# It can enable HTTPS for SDC component
+export SDC_TEST_HTTPS="${SDC_TEST_HTTPS:-false}"
+
+# By default all images are from remote upstream registry, this option
+# provides the chance to test locally built images
+export SDC_LOCAL_IMAGES="${SDC_LOCAL_IMAGES:-false}"
+
+export WORKFLOW_LOCAL_IMAGES="${WORKFLOW_LOCAL_IMAGES:-false}"
+
+# For this to take effect SDC_LOCAL_IMAGES must be enabled...
+#
+# The path to the local sdc git repo from which the local images have
+# been built - it also affects the tag used - if left empty *AND*
+# local images are used *AND* SDC_LOCAL_TAG is unset then the tag
+# will be set to: 'latest'
+#
+# BEWARE: Using local images with an incorrect git repo could lead to
+# problems...set SDC_LOCAL_GITREPO or GERRIT_BRANCH properly...
+export SDC_LOCAL_GITREPO="${SDC_LOCAL_GITREPO}"
+
+# For this to take effect SDC_LOCAL_IMAGES must be enabled...
+#
+# This will set the tag for local images - leaving this empty *AND*
+# with unset SDC_LOCAL_GITREPO the local images will fallback to the
+# tag: 'latest'
+export SDC_LOCAL_TAG="${SDC_LOCAL_TAG}"
+
+
+export WORKFLOW_LOCAL_GITREPO="${WORKFLOW_LOCAL_GITREPO}"
+
+
+
+source ${WORKSPACE}/scripts/sdc-workflow-d/sdc_workflow_d.sh
diff --git a/plans/sdc-workflow-d/teardown.sh b/plans/sdc-workflow-d/teardown.sh
new file mode 100644
index 0000000..bcc43fe
--- /dev/null
+++ b/plans/sdc-workflow-d/teardown.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2019 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source ${WORKSPACE}/scripts/sdc-workflow-d/cleanup_sdc_workflow.sh
diff --git a/plans/sdc-workflow-d/testplan.txt b/plans/sdc-workflow-d/testplan.txt
new file mode 100644
index 0000000..6d61eb2
--- /dev/null
+++ b/plans/sdc-workflow-d/testplan.txt
@@ -0,0 +1,3 @@
+# Test suites are relative paths under [integration/csit.git]/tests/.
+# Place the suites in run order.
+sdc-workflow-d/test1.robot
diff --git a/scripts/sdc-workflow-d/cleanup_sdc_workflow.sh b/scripts/sdc-workflow-d/cleanup_sdc_workflow.sh
new file mode 100644
index 0000000..3ce28a1
--- /dev/null
+++ b/scripts/sdc-workflow-d/cleanup_sdc_workflow.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Copyright 2019 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Note! This is only temporary solution for killing SDC DCAE plugin's
+# docker containers that must be currently used whenever docker_run.sh is used
+# with -dcae option - See SDC-2338 for related image naming issue
+#
+# DCAE plugin-related parts will also have to be refactored under dedicated
+# directories in the future
+#
+
+echo "This is ${WORKSPACE}/scripts/sdc-dcae-d/cleanup_sdc_workflow.sh"
+
+cp -rf ${WORKSPACE}/data/logs/ ${WORKSPACE}/archives/
+
+ls -Rt ${WORKSPACE}/archives/
+
+#kill and remove all sdc dockers
+docker stop $(docker ps -a -q --filter="name=sdc")
+docker rm $(docker ps -a -q --filter="name=sdc")
+# kill and remove all sdc dcae dockers
+docker stop workflow-frontend
+docker stop workflow-backend
+docker rm workflow-frontend
+docker rm workflow-backend
+
+#delete data folder
+
+sudo rm -rf ${WORKSPACE}/data/*
\ No newline at end of file
diff --git a/scripts/sdc-workflow-d/docker_run.sh b/scripts/sdc-workflow-d/docker_run.sh
new file mode 100755
index 0000000..e641594
--- /dev/null
+++ b/scripts/sdc-workflow-d/docker_run.sh
@@ -0,0 +1,606 @@
+#!/bin/bash
+
+#
+# Constants:
+#
+
+WORKSPACE="${WORKSPACE:-}"
+SUCCESS=0
+FAILURE=1
+
+CS_PASSWORD="onap123#@!"
+SDC_USER="asdc_user"
+SDC_PASSWORD="Aa1234%^!"
+
+JETTY_BASE="/var/lib/jetty"
+SDC_CERT_DIR="onap/cert"
+
+RELEASE=latest
+
+LOCAL=false
+BE_DEBUG_PORT="--publish 4000:4000"
+FE_DEBUG_PORT="--publish 6000:6000"
+ONBOARD_DEBUG_PORT="--publish 4001:4001"
+CS_PORT=${CS_PORT:-9042}
+
+
+# Java Options:
+BE_JAVA_OPTIONS="-Xdebug -agentlib:jdwp=transport=dt_socket,address=4000,server=y,suspend=n -Xmx1536m -Xms1536m"
+FE_JAVA_OPTIONS="-Xdebug -agentlib:jdwp=transport=dt_socket,address=6000,server=y,suspend=n -Xmx256m -Xms256m"
+ONBOARD_BE_JAVA_OPTIONS="-Xdebug -agentlib:jdwp=transport=dt_socket,address=4001,server=y,suspend=n -Xmx1g -Xms1g"
+SIM_JAVA_OPTIONS=" -Xmx128m -Xms128m -Xss1m -Dlog4j.configuration=file:///${JETTY_BASE}/config/sdc-simulator/log4j.properties"
+API_TESTS_JAVA_OPTIONS="-Xmx512m -Xms512m"
+UI_TESTS_JAVA_OPTIONS="-Xmx1024m -Xms1024m"
+#Define this as variable, so it can be excluded in run commands on Docker for OSX, as /etc/localtime cant be mounted there.
+LOCAL_TIME_MOUNT_CMD="--volume /etc/localtime:/etc/localtime:ro"
+# If os is OSX, unset this, so /etc/localtime is not mounted, otherwise leave it be
+if [[ "$OSTYPE" == "darwin"* ]]; then
+ LOCAL_TIME_MOUNT_CMD=""
+fi
+
+
+#
+# Functions:
+#
+
+
+function usage {
+ echo "usage: docker_run.sh [ -r|--release <RELEASE-NAME> ] [ -e|--environment <ENV-NAME> ] [ -p|--port <Docker-hub-port>] [ -l|--local <Run-without-pull>] [ -sim|--simulator <Run-with-simulator>] [ -ta <run api tests with the supplied test suit>] [ -tu <run ui tests with the supplied test suit>] [ -ta <run api tests with the supplied test suit>] [ -tu <run ui tests with the supplied test suit>] [ -tad <run api tests with the default test suit>] [ -tu <run ui tests with the default test suit>] [ -h|--help ]"
+ echo "start dockers built locally example: docker_run.sh -l"
+ echo "start dockers built locally and simulator example: docker_run.sh -l -sim"
+ echo "start dockers, pull from onap nexus according to release and simulator example: docker_run.sh -r 1.5-STAGING-latest -sim"
+ echo "start dockers built locally and run api tests docker example: docker_run.sh -l -tad"
+ echo "start dockers built locally and run only the catalog be example: docker_run.sh -l -d sdc-BE "
+}
+#
+
+
+function cleanup {
+ echo "Performing old dockers cleanup"
+
+ if [ "$1" == "all" ] ; then
+ docker_ids=`docker ps -a | egrep "ecomp-nexus:${PORT}/sdc|sdc|Exit}|dcae" | awk '{print $1}'`
+ for X in ${docker_ids}
+ do
+ docker rm -f ${X}
+ done
+ else
+ echo "performing $1 docker cleanup"
+ tmp=`docker ps -a -q --filter="name=$1"`
+ if [[ ! -z "$tmp" ]]; then
+ docker rm -f ${tmp}
+ fi
+ fi
+}
+#
+
+
+function dir_perms {
+ mkdir -p ${WORKSPACE}/data/logs/BE/SDC/SDC-BE
+ mkdir -p ${WORKSPACE}/data/logs/FE/SDC/SDC-FE
+ mkdir -p ${WORKSPACE}/data/logs/sdc-api-tests/ExtentReport
+ mkdir -p ${WORKSPACE}/data/logs/ONBOARD/SDC/ONBOARD-BE
+ mkdir -p ${WORKSPACE}/data/logs/sdc-api-tests/target
+ mkdir -p ${WORKSPACE}/data/logs/sdc-ui-tests/ExtentReport
+ mkdir -p ${WORKSPACE}/data/logs/sdc-ui-tests/target
+ mkdir -p ${WORKSPACE}/data/logs/docker_logs
+ mkdir -p ${WORKSPACE}/data/logs/WS
+ echo "Creating dir '${WORKSPACE}/data/${SDC_CERT_DIR}'"
+ mkdir -p ${WORKSPACE}/data/${SDC_CERT_DIR}
+ chmod -R 777 ${WORKSPACE}/data/logs
+}
+#
+
+
+function docker_logs {
+ docker logs $1 > ${WORKSPACE}/data/logs/docker_logs/$1_docker.log
+}
+#
+
+
+#
+# Readiness Prob
+#
+
+function ready_probe {
+ docker exec $1 /var/lib/ready-probe.sh > /dev/null 2>&1
+ rc=$?
+ if [[ ${rc} == 0 ]]; then
+ echo DOCKER $1 start finished in $2 seconds
+ return ${SUCCESS}
+ fi
+ return ${FAILURE}
+}
+#
+
+function ready_probe_jetty {
+ docker exec $1 /var/lib/jetty/ready-probe.sh > /dev/null 2>&1
+ rc=$?
+ if [[ ${rc} == 0 ]]; then
+ echo DOCKER $1 start finished in $2 seconds
+ return ${SUCCESS}
+ fi
+ return ${FAILURE}
+}
+#
+
+function probe_docker {
+ MATCH=`docker logs --tail 30 $1 | grep "DOCKER STARTED"`
+ echo MATCH is -- ${MATCH}
+
+ if [ -n "$MATCH" ] ; then
+ echo DOCKER start finished in $2 seconds
+ return ${SUCCESS}
+ fi
+ return ${FAILURE}
+}
+#
+
+function probe_test_docker {
+ # This expected logging should be output by startup.sh of the
+ # respective test docker container
+ MATCH=`docker logs --tail 30 $1 | grep "Startup completed successfully"`
+ echo MATCH is -- ${MATCH}
+
+ if [ -n "$MATCH" ] ; then
+ echo TEST DOCKER start finished in $2 seconds
+ return ${SUCCESS}
+ fi
+ return ${FAILURE}
+}
+#
+
+
+function probe_sim {
+ if lsof -Pi :8285 -sTCP:LISTEN -t >/dev/null ; then
+ echo "Already running"
+ return ${SUCCESS}
+ else
+ echo "Not running"
+ return ${FAILURE}
+ fi
+}
+#
+
+
+function monitor_docker {
+ DOCKER_NAME=$1
+ echo "Monitor ${DOCKER_NAME} Docker"
+ sleep 5
+ TIME_OUT=900
+ INTERVAL=20
+ TIME=0
+
+ while [ "$TIME" -lt "$TIME_OUT" ]; do
+
+ case ${DOCKER_NAME} in
+
+ sdc-cs)
+ ready_probe ${DOCKER_NAME} ${TIME} ;
+ status=$? ;
+ ;;
+ sdc-BE)
+ ready_probe_jetty ${DOCKER_NAME} ${TIME} ;
+ status=$? ;
+ ;;
+ sdc-FE)
+ ready_probe_jetty ${DOCKER_NAME} ${TIME} ;
+ status=$? ;
+ ;;
+ sdc-onboard-BE)
+ ready_probe_jetty ${DOCKER_NAME} ${TIME} ;
+ status=$? ;
+ ;;
+ sdc-api-tests)
+ probe_test_docker ${DOCKER_NAME} ${TIME};
+ status=$? ;
+ ;;
+ sdc-ui-tests)
+ probe_test_docker ${DOCKER_NAME} ${TIME};
+ status=$? ;
+ ;;
+ *)
+ probe_docker ${DOCKER_NAME} ${TIME};
+ status=$? ;
+ ;;
+
+ esac
+
+ if [[ ${status} == ${SUCCESS} ]] ; then
+ break;
+ fi
+
+ echo "Sleep: ${INTERVAL} seconds before testing if ${DOCKER_NAME} DOCKER is up. Total wait time up now is: ${TIME} seconds. Timeout is: ${TIME_OUT} seconds"
+ sleep ${INTERVAL}
+ TIME=$(($TIME+$INTERVAL))
+ done
+
+ docker_logs ${DOCKER_NAME}
+
+ if [ "$TIME" -ge "$TIME_OUT" ]; then
+ echo -e "\e[1;31mTIME OUT: DOCKER was NOT fully started in $TIME_OUT seconds... Could cause problems ...\e[0m"
+ fi
+}
+#
+
+# healthCheck script used the secure connection to send request (https is always turn on)
+function healthCheck {
+
+ echo "BE Health Check:"
+ curl -k --noproxy "*" https://${IP}:8443/sdc2/rest/healthCheck
+
+ echo ""
+ echo ""
+ echo "FE Health Check:"
+ curl -k --noproxy "*" https://${IP}:9443/sdc1/rest/healthCheck
+
+
+ echo ""
+ echo ""
+ healthCheck_http_code=$(curl -k --noproxy "*" -o /dev/null -w '%{http_code}' -H "Accept: application/json" -H "Content-Type: application/json" -H "USER_ID: jh0003" https://${IP}:8443/sdc2/rest/v1/user/demo;)
+ if [[ ${healthCheck_http_code} != 200 ]]; then
+ echo "Error [${healthCheck_http_code}] while checking existence of user"
+ return ${healthCheck_http_code}
+ fi
+ echo "check user existence: OK"
+ return ${healthCheck_http_code}
+}
+#
+
+
+function command_exit_status {
+ status=$1
+ docker=$2
+ if [ "${status}" != "0" ] ; then
+ echo "[ ERROR ] Docker ${docker} run command exit with status [${status}]"
+ fi
+}
+#
+
+
+#
+# Run Containers
+#
+
+
+#Cassandra
+function sdc-cs {
+ DOCKER_NAME="sdc-cs"
+ echo "docker run sdc-cassandra..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-cassandra:${RELEASE}
+ fi
+ docker run -dit --name ${DOCKER_NAME} --env RELEASE="${RELEASE}" --env CS_PASSWORD="${CS_PASSWORD}" --env ENVNAME="${DEP_ENV}" --env HOST_IP=${IP} --env MAX_HEAP_SIZE="1536M" --env HEAP_NEWSIZE="512M" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish ${CS_PORT}:${CS_PORT} ${PREFIX}/sdc-cassandra:${RELEASE} /bin/sh
+ command_exit_status $? ${DOCKER_NAME}
+ echo "please wait while CS is starting..."
+ monitor_docker ${DOCKER_NAME}
+}
+#
+
+
+#Cassandra-init
+function sdc-cs-init {
+ DOCKER_NAME="sdc-cs-init"
+ echo "docker run sdc-cassandra-init..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-cassandra-init:${RELEASE}
+ fi
+ docker run --name ${DOCKER_NAME} --env RELEASE="${RELEASE}" --env SDC_USER="${SDC_USER}" --env SDC_PASSWORD="${SDC_PASSWORD}" --env CS_PASSWORD="${CS_PASSWORD}" --env ENVNAME="${DEP_ENV}" --env HOST_IP=${IP} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/home/sdc/chef-solo/environments --volume ${WORKSPACE}/data/CS-Init:/root/chef-solo/cache ${PREFIX}/sdc-cassandra-init:${RELEASE} > /dev/null 2>&1
+ rc=$?
+ docker_logs ${DOCKER_NAME}
+ if [[ ${rc} != 0 ]]; then exit ${rc}; fi
+}
+#
+
+
+#Onboard Cassandra-init
+function sdc-cs-onboard-init {
+ DOCKER_NAME="sdc-cs-onboard-init"
+ echo "docker run sdc-cs-onboard-init..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-onboard-cassandra-init:${RELEASE}
+ fi
+ docker run --name ${DOCKER_NAME} --env RELEASE="${RELEASE}" --env CS_HOST_IP=${IP} --env CS_HOST_PORT=${CS_PORT} --env SDC_USER="${SDC_USER}" --env SDC_PASSWORD="${SDC_PASSWORD}" --env CS_PASSWORD="${CS_PASSWORD}" --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/home/sdc/chef-solo/environments --volume ${WORKSPACE}/data/CS-Init:/home/sdc/chef-solo/cache ${PREFIX}/sdc-onboard-cassandra-init:${RELEASE}
+ rc=$?
+ docker_logs ${DOCKER_NAME}
+ if [[ ${rc} != 0 ]]; then exit ${rc}; fi
+}
+#
+
+
+#Back-End
+function sdc-BE {
+ DOCKER_NAME="sdc-BE"
+ echo "docker run sdc-backend..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-backend:${RELEASE}
+ else
+ ADDITIONAL_ARGUMENTS=${BE_DEBUG_PORT}
+ fi
+ docker run --detach --name ${DOCKER_NAME} --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env cassandra_ssl_enabled="false" --env JAVA_OPTIONS="${BE_JAVA_OPTIONS}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/logs/BE/:${JETTY_BASE}/logs --volume ${WORKSPACE}/data/environments:${JETTY_BASE}/chef-solo/environments --publish 8443:8443 --publish 8080:8080 ${ADDITIONAL_ARGUMENTS} ${PREFIX}/sdc-backend:${RELEASE}
+ command_exit_status $? ${DOCKER_NAME}
+ echo "please wait while BE is starting..."
+ monitor_docker ${DOCKER_NAME}
+}
+#
+
+
+# Back-End-Init
+function sdc-BE-init {
+ DOCKER_NAME="sdc-BE-init"
+ echo "docker run sdc-backend-init..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-backend-init:${RELEASE}
+ fi
+ docker run --name ${DOCKER_NAME} --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/logs/BE/:${JETTY_BASE}/logs --volume ${WORKSPACE}/data/environments:/home/sdc/chef-solo/environments ${PREFIX}/sdc-backend-init:${RELEASE} > /dev/null 2>&1
+ rc=$?
+ docker_logs ${DOCKER_NAME}
+ if [[ ${rc} != 0 ]]; then exit ${rc}; fi
+}
+#
+
+
+# Onboard Back-End
+function sdc-onboard-BE {
+ DOCKER_NAME="sdc-onboard-BE"
+ echo "docker run sdc-onboard-BE ..."
+# TODO Check the dir_perms action . do we need it here ??
+# dir_perms
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-onboard-backend:${RELEASE}
+ else
+ ADDITIONAL_ARGUMENTS=${ONBOARD_DEBUG_PORT}
+ fi
+ docker run --detach --name ${DOCKER_NAME} --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env cassandra_ssl_enabled="false" --env SDC_CLUSTER_NAME="SDC-CS-${DEP_ENV}" --env SDC_USER="${SDC_USER}" --env SDC_PASSWORD="${SDC_PASSWORD}" --env SDC_CERT_DIR="${SDC_CERT_DIR}" --env JAVA_OPTIONS="${ONBOARD_BE_JAVA_OPTIONS}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/${SDC_CERT_DIR}:${JETTY_BASE}/onap/cert --volume ${WORKSPACE}/data/logs/ONBOARD:${JETTY_BASE}/logs --volume ${WORKSPACE}/data/environments:/${JETTY_BASE}/chef-solo/environments --publish 8445:8445 --publish 8081:8081 ${ADDITIONAL_ARGUMENTS} ${PREFIX}/sdc-onboard-backend:${RELEASE}
+ command_exit_status $? ${DOCKER_NAME}
+ echo "please wait while sdc-onboard-BE is starting..."
+ monitor_docker ${DOCKER_NAME}
+}
+#
+
+
+# Front-End
+function sdc-FE {
+ DOCKER_NAME="sdc-FE"
+ IMAGE_NAME="${PREFIX}/sdc-frontend:${RELEASE}"
+ echo "Running container '${DOCKER_NAME}' based on '${IMAGE_NAME}' image..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-frontend:${RELEASE}
+ else
+ ADDITIONAL_ARGUMENTS=${FE_DEBUG_PORT}
+ fi
+
+ PLUGIN_CONFIG_FILE="${WORKSPACE}/data/environments/plugins-configuration.yaml"
+ if [[ -f ${WORKSPACE}/data/environments/plugins-configuration.yaml ]]; then
+ PLUGINS_CONF_VOLUME_MOUNT="--volume ${PLUGIN_CONFIG_FILE}:${JETTY_BASE}/config/catalog-fe/plugins-configuration.yaml"
+ else
+ echo "INFO: '${PLUGIN_CONFIG_FILE}' not provided, ignoring..."
+ fi
+
+ docker run \
+ --detach \
+ --name ${DOCKER_NAME} \
+ --env HOST_IP=${IP} \
+ --env ENVNAME="${DEP_ENV}" \
+ --env JAVA_OPTIONS="${FE_JAVA_OPTIONS}" \
+ --log-driver=json-file \
+ --log-opt max-size=100m \
+ --log-opt max-file=10 \
+ --ulimit memlock=-1:-1 \
+ --ulimit nofile=4096:100000 \
+ --volume ${WORKSPACE}/data/logs/FE/:${JETTY_BASE}/logs \
+ --volume ${WORKSPACE}/data/environments:/${JETTY_BASE}/chef-solo/environments \
+ ${LOCAL_TIME_MOUNT_CMD} \
+ ${PLUGINS_CONF_VOLUME_MOUNT} \
+ --publish 9443:9443 \
+ --publish 8181:8181 \
+ ${ADDITIONAL_ARGUMENTS} \
+ ${IMAGE_NAME}
+
+ command_exit_status $? ${DOCKER_NAME}
+ echo "Please wait while '${DOCKER_NAME}' container is starting..."
+ monitor_docker ${DOCKER_NAME}
+}
+#
+
+
+# apis-sanity
+function sdc-api-tests {
+ if [[ ${RUN_API_TESTS} = true ]] ; then
+ healthCheck
+ healthCheck_http_code=$?
+ if [[ ${healthCheck_http_code} == 200 ]] ; then
+ echo "docker run sdc-api-tests..."
+ echo "Trigger sdc-api-tests docker, please wait..."
+
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-api-tests:${RELEASE}
+ fi
+
+ docker run --detach --name sdc-api-tests --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env JAVA_OPTIONS="${API_TESTS_JAVA_OPTIONS}" --env SUITE_NAME=${API_SUITE} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/logs/sdc-api-tests/target:/var/lib/tests/target --volume ${WORKSPACE}/data/logs/sdc-api-tests/ExtentReport:/var/lib/tests/ExtentReport --volume ${WORKSPACE}/data/logs/sdc-api-tests/outputCsar:/var/lib/tests/outputCsar --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --volume ${WORKSPACE}/data/${SDC_CERT_DIR}:/var/lib/tests/cert --publish 9560:9560 ${PREFIX}/sdc-api-tests:${RELEASE} echo "please wait while SDC-API-TESTS is starting....."
+ monitor_docker sdc-api-tests
+ fi
+ fi
+}
+#
+
+
+# ui-sanity
+function sdc-ui-tests {
+
+ if [[ ${RUN_UI_TESTS} = true ]] ; then
+ healthCheck
+ healthCheck_http_code=$?
+ if [[ ${healthCheck_http_code} == 200 ]]; then
+ echo "docker run sdc-ui-tets..."
+ echo "Trigger sdc-ui-tests docker, please wait..."
+
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-ui-tests:${RELEASE}
+ fi
+ RUN_SIMULATOR=true;
+ sdc-sim
+ docker run --detach --name sdc-ui-tests --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env JAVA_OPTIONS="${UI_TESTS_JAVA_OPTIONS}" --env SUITE_NAME=${UI_SUITE} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/logs/sdc-ui-tests/target:/var/lib/tests/target --volume ${WORKSPACE}/data/logs/sdc-ui-tests/ExtentReport:/var/lib/tests/ExtentReport --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 5901:5901 --publish 6901:6901 ${PREFIX}/sdc-ui-tests:${RELEASE}
+ echo "please wait while SDC-UI-TESTS is starting....."
+ monitor_docker sdc-ui-tests
+ fi
+ fi
+}
+#
+
+
+# SDC-Simulator
+function sdc-sim {
+ if [ "${RUN_SIMULATOR}" == true ]; then
+ echo "docker run sdc-webSimulator..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-simulator:${RELEASE}
+ fi
+
+ probe_sim
+ sim_stat=$?
+ if [ ${sim_stat} == 1 ]; then
+ docker run \
+ --detach \
+ --name sdc-sim \
+ --env FE_URL="${FE_URL}" \
+ --env JAVA_OPTIONS="${SIM_JAVA_OPTIONS}" \
+ --env ENVNAME="${DEP_ENV}" \
+ ${LOCAL_TIME_MOUNT_CMD} \
+ --volume ${WORKSPACE}/data/logs/WS/:${JETTY_BASE}/logs \
+ --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments \
+ --publish 8285:8080 \
+ --publish 8286:8443 ${PREFIX}/sdc-simulator:${RELEASE}
+ echo "please wait while SDC-WEB-SIMULATOR is starting....."
+ monitor_docker sdc-sim
+ fi
+ fi
+}
+#
+
+
+#
+# Main
+#
+
+# Handle command line arguments
+while [ $# -gt 0 ]; do
+ case $1 in
+
+ # -r | --release - The specific docker version to pull and deploy
+ -r | --release )
+ shift 1 ;
+ RELEASE=$1;
+ shift 1;;
+
+ # -e | --environment - The environment name you want to deploy
+ -e | --environment )
+ shift 1;
+ DEP_ENV=$1;
+ shift 1 ;;
+
+ # -p | --port - The port from which to connect to the docker nexus
+ -p | --port )
+ shift 1 ;
+ PORT=$1;
+ shift 1 ;;
+
+ # -l | --local - Use this for deploying your local dockers without pulling them first
+ -l | --local )
+ LOCAL=true;
+ shift 1;;
+
+ # -ta - Use this for running the APIs sanity docker after all other dockers have been deployed
+ -ta )
+ shift 1 ;
+ API_SUITE=$1;
+ RUN_API_TESTS=true;
+ shift 1 ;;
+
+ # -tu - Use this for running the UI sanity docker after all other dockers have been deployed
+ -tu )
+ shift 1 ;
+ UI_SUITE=$1;
+ RUN_UI_TESTS=true;
+ shift 1 ;;
+
+ # -tad - Use this for running the DEFAULT suite of tests in APIs sanity docker after all other dockers have been deployed
+ -tad | -t )
+ API_SUITE="onapApiSanity";
+ RUN_API_TESTS=true;
+ shift 1 ;;
+
+ # -tud - Use this for running the DEFAULT suite of tests in UI sanity docker after all other dockers have been deployed
+ -tud )
+ UI_SUITE="onapUiSanity";
+ RUN_UI_TESTS=true;
+ shift 1 ;;
+
+ # -d | --docker - The init specified docker
+ -d | --docker )
+ shift 1 ;
+ DOCKER=$1;
+ shift 1 ;;
+ # -sim | --simulator run the simulator
+ -sim | --simulator )
+ RUN_SIMULATOR=true;
+ shift 1 ;;
+ # -sim | --simulator run the simulator
+ -u | --fe_url )
+ shift 1 ;
+ FE_URL=$1;
+ shift 1 ;;
+
+ # -h | --help - Display the help message with all the available run options
+ -h | --help )
+ usage;
+ exit ${SUCCESS};;
+
+ * )
+ usage;
+ exit ${FAILURE};;
+ esac
+done
+
+
+#Prefix those with WORKSPACE so it can be set to something other than /opt
+[ -f ${WORKSPACE}/opt/config/env_name.txt ] && DEP_ENV=$(cat ${WORKSPACE}/opt/config/env_name.txt) || echo ${DEP_ENV}
+[ -f ${WORKSPACE}/opt/config/nexus_username.txt ] && NEXUS_USERNAME=$(cat ${WORKSPACE}/opt/config/nexus_username.txt) || NEXUS_USERNAME=release
+[ -f ${WORKSPACE}/opt/config/nexus_password.txt ] && NEXUS_PASSWD=$(cat ${WORKSPACE}/opt/config/nexus_password.txt) || NEXUS_PASSWD=sfWU3DFVdBr7GVxB85mTYgAW
+[ -f ${WORKSPACE}/opt/config/nexus_docker_repo.txt ] && NEXUS_DOCKER_REPO=$(cat ${WORKSPACE}/opt/config/nexus_docker_repo.txt) || NEXUS_DOCKER_REPO=nexus3.onap.org:${PORT}
+[ -f ${WORKSPACE}/opt/config/nexus_username.txt ] && docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
+
+
+export IP=`ip route get 8.8.8.8 | awk '/src/{ print $7 }'`
+#If OSX, then use this to get IP
+if [[ "$OSTYPE" == "darwin"* ]]; then
+ export IP=$(ipconfig getifaddr en0)
+fi
+export PREFIX=${NEXUS_DOCKER_REPO}'/onap'
+
+if [ ${LOCAL} = true ]; then
+ PREFIX='onap'
+fi
+
+echo ""
+
+if [ -z "${DOCKER}" ]; then
+ cleanup all
+ dir_perms
+ sdc-cs
+ sdc-cs-init
+ sdc-cs-onboard-init
+ sdc-onboard-BE
+ sdc-BE
+ sdc-BE-init
+ sdc-FE
+ healthCheck
+ sdc-sim
+ sdc-api-tests
+ sdc-ui-tests
+else
+ cleanup ${DOCKER}
+ dir_perms
+ ${DOCKER}
+ healthCheck
+fi
diff --git a/scripts/sdc-workflow-d/sdc_workflow_d.sh b/scripts/sdc-workflow-d/sdc_workflow_d.sh
new file mode 100644
index 0000000..6432090
--- /dev/null
+++ b/scripts/sdc-workflow-d/sdc_workflow_d.sh
@@ -0,0 +1,117 @@
+#!/bin/bash
+#
+# Copyright 2019 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+echo "[INFO] This is sdc_workflow_d.sh"
+# run sdc deployment
+source "${WORKSPACE}/scripts/sdc/setup_sdc_for_sanity.sh"
+export ROBOT_VARIABLES
+
+# fail quick if error
+set -exo pipefail
+
+export ENV_NAME='CSIT'
+
+function iss_true {
+ _value=$(eval echo "\$${1}" | tr '[:upper:]' '[:lower:]')
+
+ case "$_value" in
+ 1|yes|true|Y)
+ return 0
+ ;;
+ esac
+
+ return 1
+}
+
+# returns 0: if SDC_LOCAL_IMAGES is set to true value
+# returns 1: otherwise
+function using_local_workflow_images {
+ iss_true WORKFLOW_LOCAL_IMAGES
+}
+
+# cloning workflow directory
+mkdir -p "${WORKSPACE}/data/clone/"
+cd "${WORKSPACE}/data/clone"
+if using_local_workflow_images && [ -n "$WORKFLOW_LOCAL_GITREPO" ] ; then
+ WORKFLOW_LOCAL_GITREPO=$(realpath "$WORKFLOW_LOCAL_GITREPO")
+ if [ -d "$WORKFLOW_LOCAL_GITREPO" ] ; then
+ rm -rf ./workflow
+ cp -a "$WORKFLOW_LOCAL_GITREPO" ./workflow
+ else
+ echo "[ERROR]: Local git repo for workflow does not exist: ${WORKFLOW_LOCAL_GITREPO}"
+ exit 1
+ fi
+else
+ git clone --depth 1 "https://github.com/onap/sdc-sdc-workflow-designer.git" -b ${GERRIT_BRANCH}
+fi
+# set enviroment variables
+source ${WORKSPACE}/data/clone/workflow/version.properties
+export WORKFLOW_RELEASE=$major.$minor-STAGING-latest
+
+SDC_CS=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' sdc-cs)
+SDC_BE=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' sdc-BE)
+echo "[INFO] Initialization of workflow init"
+echo ${SDC_CS}
+echo ${SDC_BE}
+docker run -ti \
+ -e "CS_HOST=${SDC_CS}" \
+ -e "CS_PORT=9042" \
+ -e "CS_AUTHENTICATE=true"\
+ -e "CS_USER=asdc_user" \
+ -e "CS_PASSWORD=Aa1234%^!" nexus3.onap.org:10001/onap/sdc-workflow-init:latest
+
+echo "[INFO] Initialization of workflow Backend init"
+docker run -d --name "workflow-backend" -e "SDC_PROTOCOL=http" \
+ -e "SDC_ENDPOINT=${SDC_BE}:8080" \
+ -e "SDC_USER=workflow" \
+ -e "SDC_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U" \
+ -e "CS_HOSTS=${SDC_CS}" \
+ -e "CS_PORT=9042" \
+ -e "CS_AUTHENTICATE=true"\
+ -e "CS_USER=asdc_user" \
+ -e "CS_PASSWORD=Aa1234%^!" \
+ -e "CS_SSL_ENABLED=false"\
+ -e "SERVER_SSL_ENABLED=false" \
+ --env JAVA_OPTIONS="${BE_JAVA_OPTIONS}" --publish 8384:8080 --publish 10443:8443 --publish 8000:8000 nexus3.onap.org:10001/onap/sdc-workflow-backend:latest
+
+WORKFLOW_BE=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' workflow-backend)
+echo "[INFO] starting workflow designer fronend"
+docker run -d --name "workflow-frontend" \
+ -e BACKEND="http://${WORKFLOW_BE}:8080"\
+ --publish 8484:8080 --publish 11443:8443 nexus3.onap.org:10001/onap/sdc-workflow-frontend:latest
+
+cp "${WORKSPACE}/data/clone/sdc/sdc-os-chef/environments/plugins-configuration.yaml" \
+ "${WORKSPACE}/data/environments/plugins-configuration.yaml"
+
+WF_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' workflow-frontend)
+WFADDR="http:\/\/${WF_IP}:8080\/workflows"
+echo ${WFADDR}
+sed -i \
+ -e "s/<%= @workflow_discovery_url %>/${WFADDR}/g" \
+ -e "s/<%= @workflow_source_url %>/${WFADDR}/g" \
+ "${WORKSPACE}/data/environments/plugins-configuration.yaml"
+
+cp "${WORKSPACE}/data/clone/sdc/sdc-os-chef/scripts/docker_run.sh" "${WORKSPACE}/scripts/sdc-workflow-d/"
+
+echo "[INFO] restarting sdc-FE with updated plugin configuration file with Worflow host ip"
+docker stop sdc-FE
+"${WORKSPACE}/scripts/sdc-workflow-d/docker_run.sh" \
+ --local \
+ -e "${ENV_NAME}" \
+ -p 10001 -d sdc-FE
+# This file is sourced in another script which is out of our control...
+set +e
+set +o pipefail
diff --git a/scripts/sdc/docker_run.sh b/scripts/sdc/docker_run.sh
new file mode 100755
index 0000000..e641594
--- /dev/null
+++ b/scripts/sdc/docker_run.sh
@@ -0,0 +1,606 @@
+#!/bin/bash
+
+#
+# Constants:
+#
+
+WORKSPACE="${WORKSPACE:-}"
+SUCCESS=0
+FAILURE=1
+
+CS_PASSWORD="onap123#@!"
+SDC_USER="asdc_user"
+SDC_PASSWORD="Aa1234%^!"
+
+JETTY_BASE="/var/lib/jetty"
+SDC_CERT_DIR="onap/cert"
+
+RELEASE=latest
+
+LOCAL=false
+BE_DEBUG_PORT="--publish 4000:4000"
+FE_DEBUG_PORT="--publish 6000:6000"
+ONBOARD_DEBUG_PORT="--publish 4001:4001"
+CS_PORT=${CS_PORT:-9042}
+
+
+# Java Options:
+BE_JAVA_OPTIONS="-Xdebug -agentlib:jdwp=transport=dt_socket,address=4000,server=y,suspend=n -Xmx1536m -Xms1536m"
+FE_JAVA_OPTIONS="-Xdebug -agentlib:jdwp=transport=dt_socket,address=6000,server=y,suspend=n -Xmx256m -Xms256m"
+ONBOARD_BE_JAVA_OPTIONS="-Xdebug -agentlib:jdwp=transport=dt_socket,address=4001,server=y,suspend=n -Xmx1g -Xms1g"
+SIM_JAVA_OPTIONS=" -Xmx128m -Xms128m -Xss1m -Dlog4j.configuration=file:///${JETTY_BASE}/config/sdc-simulator/log4j.properties"
+API_TESTS_JAVA_OPTIONS="-Xmx512m -Xms512m"
+UI_TESTS_JAVA_OPTIONS="-Xmx1024m -Xms1024m"
+#Define this as variable, so it can be excluded in run commands on Docker for OSX, as /etc/localtime cant be mounted there.
+LOCAL_TIME_MOUNT_CMD="--volume /etc/localtime:/etc/localtime:ro"
+# If os is OSX, unset this, so /etc/localtime is not mounted, otherwise leave it be
+if [[ "$OSTYPE" == "darwin"* ]]; then
+ LOCAL_TIME_MOUNT_CMD=""
+fi
+
+
+#
+# Functions:
+#
+
+
+function usage {
+ echo "usage: docker_run.sh [ -r|--release <RELEASE-NAME> ] [ -e|--environment <ENV-NAME> ] [ -p|--port <Docker-hub-port>] [ -l|--local <Run-without-pull>] [ -sim|--simulator <Run-with-simulator>] [ -ta <run api tests with the supplied test suit>] [ -tu <run ui tests with the supplied test suit>] [ -ta <run api tests with the supplied test suit>] [ -tu <run ui tests with the supplied test suit>] [ -tad <run api tests with the default test suit>] [ -tu <run ui tests with the default test suit>] [ -h|--help ]"
+ echo "start dockers built locally example: docker_run.sh -l"
+ echo "start dockers built locally and simulator example: docker_run.sh -l -sim"
+ echo "start dockers, pull from onap nexus according to release and simulator example: docker_run.sh -r 1.5-STAGING-latest -sim"
+ echo "start dockers built locally and run api tests docker example: docker_run.sh -l -tad"
+ echo "start dockers built locally and run only the catalog be example: docker_run.sh -l -d sdc-BE "
+}
+#
+
+
+function cleanup {
+ echo "Performing old dockers cleanup"
+
+ if [ "$1" == "all" ] ; then
+ docker_ids=`docker ps -a | egrep "ecomp-nexus:${PORT}/sdc|sdc|Exit}|dcae" | awk '{print $1}'`
+ for X in ${docker_ids}
+ do
+ docker rm -f ${X}
+ done
+ else
+ echo "performing $1 docker cleanup"
+ tmp=`docker ps -a -q --filter="name=$1"`
+ if [[ ! -z "$tmp" ]]; then
+ docker rm -f ${tmp}
+ fi
+ fi
+}
+#
+
+
+function dir_perms {
+ mkdir -p ${WORKSPACE}/data/logs/BE/SDC/SDC-BE
+ mkdir -p ${WORKSPACE}/data/logs/FE/SDC/SDC-FE
+ mkdir -p ${WORKSPACE}/data/logs/sdc-api-tests/ExtentReport
+ mkdir -p ${WORKSPACE}/data/logs/ONBOARD/SDC/ONBOARD-BE
+ mkdir -p ${WORKSPACE}/data/logs/sdc-api-tests/target
+ mkdir -p ${WORKSPACE}/data/logs/sdc-ui-tests/ExtentReport
+ mkdir -p ${WORKSPACE}/data/logs/sdc-ui-tests/target
+ mkdir -p ${WORKSPACE}/data/logs/docker_logs
+ mkdir -p ${WORKSPACE}/data/logs/WS
+ echo "Creating dir '${WORKSPACE}/data/${SDC_CERT_DIR}'"
+ mkdir -p ${WORKSPACE}/data/${SDC_CERT_DIR}
+ chmod -R 777 ${WORKSPACE}/data/logs
+}
+#
+
+
+function docker_logs {
+ docker logs $1 > ${WORKSPACE}/data/logs/docker_logs/$1_docker.log
+}
+#
+
+
+#
+# Readiness Prob
+#
+
+function ready_probe {
+ docker exec $1 /var/lib/ready-probe.sh > /dev/null 2>&1
+ rc=$?
+ if [[ ${rc} == 0 ]]; then
+ echo DOCKER $1 start finished in $2 seconds
+ return ${SUCCESS}
+ fi
+ return ${FAILURE}
+}
+#
+
+function ready_probe_jetty {
+ docker exec $1 /var/lib/jetty/ready-probe.sh > /dev/null 2>&1
+ rc=$?
+ if [[ ${rc} == 0 ]]; then
+ echo DOCKER $1 start finished in $2 seconds
+ return ${SUCCESS}
+ fi
+ return ${FAILURE}
+}
+#
+
+function probe_docker {
+ MATCH=`docker logs --tail 30 $1 | grep "DOCKER STARTED"`
+ echo MATCH is -- ${MATCH}
+
+ if [ -n "$MATCH" ] ; then
+ echo DOCKER start finished in $2 seconds
+ return ${SUCCESS}
+ fi
+ return ${FAILURE}
+}
+#
+
+function probe_test_docker {
+ # This expected logging should be output by startup.sh of the
+ # respective test docker container
+ MATCH=`docker logs --tail 30 $1 | grep "Startup completed successfully"`
+ echo MATCH is -- ${MATCH}
+
+ if [ -n "$MATCH" ] ; then
+ echo TEST DOCKER start finished in $2 seconds
+ return ${SUCCESS}
+ fi
+ return ${FAILURE}
+}
+#
+
+
+function probe_sim {
+ if lsof -Pi :8285 -sTCP:LISTEN -t >/dev/null ; then
+ echo "Already running"
+ return ${SUCCESS}
+ else
+ echo "Not running"
+ return ${FAILURE}
+ fi
+}
+#
+
+
+function monitor_docker {
+ DOCKER_NAME=$1
+ echo "Monitor ${DOCKER_NAME} Docker"
+ sleep 5
+ TIME_OUT=900
+ INTERVAL=20
+ TIME=0
+
+ while [ "$TIME" -lt "$TIME_OUT" ]; do
+
+ case ${DOCKER_NAME} in
+
+ sdc-cs)
+ ready_probe ${DOCKER_NAME} ${TIME} ;
+ status=$? ;
+ ;;
+ sdc-BE)
+ ready_probe_jetty ${DOCKER_NAME} ${TIME} ;
+ status=$? ;
+ ;;
+ sdc-FE)
+ ready_probe_jetty ${DOCKER_NAME} ${TIME} ;
+ status=$? ;
+ ;;
+ sdc-onboard-BE)
+ ready_probe_jetty ${DOCKER_NAME} ${TIME} ;
+ status=$? ;
+ ;;
+ sdc-api-tests)
+ probe_test_docker ${DOCKER_NAME} ${TIME};
+ status=$? ;
+ ;;
+ sdc-ui-tests)
+ probe_test_docker ${DOCKER_NAME} ${TIME};
+ status=$? ;
+ ;;
+ *)
+ probe_docker ${DOCKER_NAME} ${TIME};
+ status=$? ;
+ ;;
+
+ esac
+
+ if [[ ${status} == ${SUCCESS} ]] ; then
+ break;
+ fi
+
+ echo "Sleep: ${INTERVAL} seconds before testing if ${DOCKER_NAME} DOCKER is up. Total wait time up now is: ${TIME} seconds. Timeout is: ${TIME_OUT} seconds"
+ sleep ${INTERVAL}
+ TIME=$(($TIME+$INTERVAL))
+ done
+
+ docker_logs ${DOCKER_NAME}
+
+ if [ "$TIME" -ge "$TIME_OUT" ]; then
+ echo -e "\e[1;31mTIME OUT: DOCKER was NOT fully started in $TIME_OUT seconds... Could cause problems ...\e[0m"
+ fi
+}
+#
+
+# healthCheck script used the secure connection to send request (https is always turn on)
+function healthCheck {
+
+ echo "BE Health Check:"
+ curl -k --noproxy "*" https://${IP}:8443/sdc2/rest/healthCheck
+
+ echo ""
+ echo ""
+ echo "FE Health Check:"
+ curl -k --noproxy "*" https://${IP}:9443/sdc1/rest/healthCheck
+
+
+ echo ""
+ echo ""
+ healthCheck_http_code=$(curl -k --noproxy "*" -o /dev/null -w '%{http_code}' -H "Accept: application/json" -H "Content-Type: application/json" -H "USER_ID: jh0003" https://${IP}:8443/sdc2/rest/v1/user/demo;)
+ if [[ ${healthCheck_http_code} != 200 ]]; then
+ echo "Error [${healthCheck_http_code}] while checking existence of user"
+ return ${healthCheck_http_code}
+ fi
+ echo "check user existence: OK"
+ return ${healthCheck_http_code}
+}
+#
+
+
+function command_exit_status {
+ status=$1
+ docker=$2
+ if [ "${status}" != "0" ] ; then
+ echo "[ ERROR ] Docker ${docker} run command exit with status [${status}]"
+ fi
+}
+#
+
+
+#
+# Run Containers
+#
+
+
+#Cassandra
+function sdc-cs {
+ DOCKER_NAME="sdc-cs"
+ echo "docker run sdc-cassandra..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-cassandra:${RELEASE}
+ fi
+ docker run -dit --name ${DOCKER_NAME} --env RELEASE="${RELEASE}" --env CS_PASSWORD="${CS_PASSWORD}" --env ENVNAME="${DEP_ENV}" --env HOST_IP=${IP} --env MAX_HEAP_SIZE="1536M" --env HEAP_NEWSIZE="512M" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish ${CS_PORT}:${CS_PORT} ${PREFIX}/sdc-cassandra:${RELEASE} /bin/sh
+ command_exit_status $? ${DOCKER_NAME}
+ echo "please wait while CS is starting..."
+ monitor_docker ${DOCKER_NAME}
+}
+#
+
+
+#Cassandra-init
+function sdc-cs-init {
+ DOCKER_NAME="sdc-cs-init"
+ echo "docker run sdc-cassandra-init..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-cassandra-init:${RELEASE}
+ fi
+ docker run --name ${DOCKER_NAME} --env RELEASE="${RELEASE}" --env SDC_USER="${SDC_USER}" --env SDC_PASSWORD="${SDC_PASSWORD}" --env CS_PASSWORD="${CS_PASSWORD}" --env ENVNAME="${DEP_ENV}" --env HOST_IP=${IP} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/home/sdc/chef-solo/environments --volume ${WORKSPACE}/data/CS-Init:/root/chef-solo/cache ${PREFIX}/sdc-cassandra-init:${RELEASE} > /dev/null 2>&1
+ rc=$?
+ docker_logs ${DOCKER_NAME}
+ if [[ ${rc} != 0 ]]; then exit ${rc}; fi
+}
+#
+
+
+#Onboard Cassandra-init
+function sdc-cs-onboard-init {
+ DOCKER_NAME="sdc-cs-onboard-init"
+ echo "docker run sdc-cs-onboard-init..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-onboard-cassandra-init:${RELEASE}
+ fi
+ docker run --name ${DOCKER_NAME} --env RELEASE="${RELEASE}" --env CS_HOST_IP=${IP} --env CS_HOST_PORT=${CS_PORT} --env SDC_USER="${SDC_USER}" --env SDC_PASSWORD="${SDC_PASSWORD}" --env CS_PASSWORD="${CS_PASSWORD}" --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/home/sdc/chef-solo/environments --volume ${WORKSPACE}/data/CS-Init:/home/sdc/chef-solo/cache ${PREFIX}/sdc-onboard-cassandra-init:${RELEASE}
+ rc=$?
+ docker_logs ${DOCKER_NAME}
+ if [[ ${rc} != 0 ]]; then exit ${rc}; fi
+}
+#
+
+
+#Back-End
+function sdc-BE {
+ DOCKER_NAME="sdc-BE"
+ echo "docker run sdc-backend..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-backend:${RELEASE}
+ else
+ ADDITIONAL_ARGUMENTS=${BE_DEBUG_PORT}
+ fi
+ docker run --detach --name ${DOCKER_NAME} --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env cassandra_ssl_enabled="false" --env JAVA_OPTIONS="${BE_JAVA_OPTIONS}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/logs/BE/:${JETTY_BASE}/logs --volume ${WORKSPACE}/data/environments:${JETTY_BASE}/chef-solo/environments --publish 8443:8443 --publish 8080:8080 ${ADDITIONAL_ARGUMENTS} ${PREFIX}/sdc-backend:${RELEASE}
+ command_exit_status $? ${DOCKER_NAME}
+ echo "please wait while BE is starting..."
+ monitor_docker ${DOCKER_NAME}
+}
+#
+
+
+# Back-End-Init
+function sdc-BE-init {
+ DOCKER_NAME="sdc-BE-init"
+ echo "docker run sdc-backend-init..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-backend-init:${RELEASE}
+ fi
+ docker run --name ${DOCKER_NAME} --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/logs/BE/:${JETTY_BASE}/logs --volume ${WORKSPACE}/data/environments:/home/sdc/chef-solo/environments ${PREFIX}/sdc-backend-init:${RELEASE} > /dev/null 2>&1
+ rc=$?
+ docker_logs ${DOCKER_NAME}
+ if [[ ${rc} != 0 ]]; then exit ${rc}; fi
+}
+#
+
+
+# Onboard Back-End
+function sdc-onboard-BE {
+ DOCKER_NAME="sdc-onboard-BE"
+ echo "docker run sdc-onboard-BE ..."
+# TODO Check the dir_perms action . do we need it here ??
+# dir_perms
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-onboard-backend:${RELEASE}
+ else
+ ADDITIONAL_ARGUMENTS=${ONBOARD_DEBUG_PORT}
+ fi
+ docker run --detach --name ${DOCKER_NAME} --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env cassandra_ssl_enabled="false" --env SDC_CLUSTER_NAME="SDC-CS-${DEP_ENV}" --env SDC_USER="${SDC_USER}" --env SDC_PASSWORD="${SDC_PASSWORD}" --env SDC_CERT_DIR="${SDC_CERT_DIR}" --env JAVA_OPTIONS="${ONBOARD_BE_JAVA_OPTIONS}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/${SDC_CERT_DIR}:${JETTY_BASE}/onap/cert --volume ${WORKSPACE}/data/logs/ONBOARD:${JETTY_BASE}/logs --volume ${WORKSPACE}/data/environments:/${JETTY_BASE}/chef-solo/environments --publish 8445:8445 --publish 8081:8081 ${ADDITIONAL_ARGUMENTS} ${PREFIX}/sdc-onboard-backend:${RELEASE}
+ command_exit_status $? ${DOCKER_NAME}
+ echo "please wait while sdc-onboard-BE is starting..."
+ monitor_docker ${DOCKER_NAME}
+}
+#
+
+
+# Front-End
+function sdc-FE {
+ DOCKER_NAME="sdc-FE"
+ IMAGE_NAME="${PREFIX}/sdc-frontend:${RELEASE}"
+ echo "Running container '${DOCKER_NAME}' based on '${IMAGE_NAME}' image..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-frontend:${RELEASE}
+ else
+ ADDITIONAL_ARGUMENTS=${FE_DEBUG_PORT}
+ fi
+
+ PLUGIN_CONFIG_FILE="${WORKSPACE}/data/environments/plugins-configuration.yaml"
+ if [[ -f ${WORKSPACE}/data/environments/plugins-configuration.yaml ]]; then
+ PLUGINS_CONF_VOLUME_MOUNT="--volume ${PLUGIN_CONFIG_FILE}:${JETTY_BASE}/config/catalog-fe/plugins-configuration.yaml"
+ else
+ echo "INFO: '${PLUGIN_CONFIG_FILE}' not provided, ignoring..."
+ fi
+
+ docker run \
+ --detach \
+ --name ${DOCKER_NAME} \
+ --env HOST_IP=${IP} \
+ --env ENVNAME="${DEP_ENV}" \
+ --env JAVA_OPTIONS="${FE_JAVA_OPTIONS}" \
+ --log-driver=json-file \
+ --log-opt max-size=100m \
+ --log-opt max-file=10 \
+ --ulimit memlock=-1:-1 \
+ --ulimit nofile=4096:100000 \
+ --volume ${WORKSPACE}/data/logs/FE/:${JETTY_BASE}/logs \
+ --volume ${WORKSPACE}/data/environments:/${JETTY_BASE}/chef-solo/environments \
+ ${LOCAL_TIME_MOUNT_CMD} \
+ ${PLUGINS_CONF_VOLUME_MOUNT} \
+ --publish 9443:9443 \
+ --publish 8181:8181 \
+ ${ADDITIONAL_ARGUMENTS} \
+ ${IMAGE_NAME}
+
+ command_exit_status $? ${DOCKER_NAME}
+ echo "Please wait while '${DOCKER_NAME}' container is starting..."
+ monitor_docker ${DOCKER_NAME}
+}
+#
+
+
+# apis-sanity
+function sdc-api-tests {
+ if [[ ${RUN_API_TESTS} = true ]] ; then
+ healthCheck
+ healthCheck_http_code=$?
+ if [[ ${healthCheck_http_code} == 200 ]] ; then
+ echo "docker run sdc-api-tests..."
+ echo "Trigger sdc-api-tests docker, please wait..."
+
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-api-tests:${RELEASE}
+ fi
+
+ docker run --detach --name sdc-api-tests --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env JAVA_OPTIONS="${API_TESTS_JAVA_OPTIONS}" --env SUITE_NAME=${API_SUITE} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/logs/sdc-api-tests/target:/var/lib/tests/target --volume ${WORKSPACE}/data/logs/sdc-api-tests/ExtentReport:/var/lib/tests/ExtentReport --volume ${WORKSPACE}/data/logs/sdc-api-tests/outputCsar:/var/lib/tests/outputCsar --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --volume ${WORKSPACE}/data/${SDC_CERT_DIR}:/var/lib/tests/cert --publish 9560:9560 ${PREFIX}/sdc-api-tests:${RELEASE} echo "please wait while SDC-API-TESTS is starting....."
+ monitor_docker sdc-api-tests
+ fi
+ fi
+}
+#
+
+
+# ui-sanity
+function sdc-ui-tests {
+
+ if [[ ${RUN_UI_TESTS} = true ]] ; then
+ healthCheck
+ healthCheck_http_code=$?
+ if [[ ${healthCheck_http_code} == 200 ]]; then
+ echo "docker run sdc-ui-tets..."
+ echo "Trigger sdc-ui-tests docker, please wait..."
+
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-ui-tests:${RELEASE}
+ fi
+ RUN_SIMULATOR=true;
+ sdc-sim
+ docker run --detach --name sdc-ui-tests --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env JAVA_OPTIONS="${UI_TESTS_JAVA_OPTIONS}" --env SUITE_NAME=${UI_SUITE} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 ${LOCAL_TIME_MOUNT_CMD} --volume ${WORKSPACE}/data/logs/sdc-ui-tests/target:/var/lib/tests/target --volume ${WORKSPACE}/data/logs/sdc-ui-tests/ExtentReport:/var/lib/tests/ExtentReport --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 5901:5901 --publish 6901:6901 ${PREFIX}/sdc-ui-tests:${RELEASE}
+ echo "please wait while SDC-UI-TESTS is starting....."
+ monitor_docker sdc-ui-tests
+ fi
+ fi
+}
+#
+
+
+# SDC-Simulator
+function sdc-sim {
+ if [ "${RUN_SIMULATOR}" == true ]; then
+ echo "docker run sdc-webSimulator..."
+ if [ ${LOCAL} = false ]; then
+ docker pull ${PREFIX}/sdc-simulator:${RELEASE}
+ fi
+
+ probe_sim
+ sim_stat=$?
+ if [ ${sim_stat} == 1 ]; then
+ docker run \
+ --detach \
+ --name sdc-sim \
+ --env FE_URL="${FE_URL}" \
+ --env JAVA_OPTIONS="${SIM_JAVA_OPTIONS}" \
+ --env ENVNAME="${DEP_ENV}" \
+ ${LOCAL_TIME_MOUNT_CMD} \
+ --volume ${WORKSPACE}/data/logs/WS/:${JETTY_BASE}/logs \
+ --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments \
+ --publish 8285:8080 \
+ --publish 8286:8443 ${PREFIX}/sdc-simulator:${RELEASE}
+ echo "please wait while SDC-WEB-SIMULATOR is starting....."
+ monitor_docker sdc-sim
+ fi
+ fi
+}
+#
+
+
+#
+# Main
+#
+
+# Handle command line arguments
+while [ $# -gt 0 ]; do
+ case $1 in
+
+ # -r | --release - The specific docker version to pull and deploy
+ -r | --release )
+ shift 1 ;
+ RELEASE=$1;
+ shift 1;;
+
+ # -e | --environment - The environment name you want to deploy
+ -e | --environment )
+ shift 1;
+ DEP_ENV=$1;
+ shift 1 ;;
+
+ # -p | --port - The port from which to connect to the docker nexus
+ -p | --port )
+ shift 1 ;
+ PORT=$1;
+ shift 1 ;;
+
+ # -l | --local - Use this for deploying your local dockers without pulling them first
+ -l | --local )
+ LOCAL=true;
+ shift 1;;
+
+ # -ta - Use this for running the APIs sanity docker after all other dockers have been deployed
+ -ta )
+ shift 1 ;
+ API_SUITE=$1;
+ RUN_API_TESTS=true;
+ shift 1 ;;
+
+ # -tu - Use this for running the UI sanity docker after all other dockers have been deployed
+ -tu )
+ shift 1 ;
+ UI_SUITE=$1;
+ RUN_UI_TESTS=true;
+ shift 1 ;;
+
+ # -tad - Use this for running the DEFAULT suite of tests in APIs sanity docker after all other dockers have been deployed
+ -tad | -t )
+ API_SUITE="onapApiSanity";
+ RUN_API_TESTS=true;
+ shift 1 ;;
+
+ # -tud - Use this for running the DEFAULT suite of tests in UI sanity docker after all other dockers have been deployed
+ -tud )
+ UI_SUITE="onapUiSanity";
+ RUN_UI_TESTS=true;
+ shift 1 ;;
+
+ # -d | --docker - The init specified docker
+ -d | --docker )
+ shift 1 ;
+ DOCKER=$1;
+ shift 1 ;;
+ # -sim | --simulator run the simulator
+ -sim | --simulator )
+ RUN_SIMULATOR=true;
+ shift 1 ;;
+ # -sim | --simulator run the simulator
+ -u | --fe_url )
+ shift 1 ;
+ FE_URL=$1;
+ shift 1 ;;
+
+ # -h | --help - Display the help message with all the available run options
+ -h | --help )
+ usage;
+ exit ${SUCCESS};;
+
+ * )
+ usage;
+ exit ${FAILURE};;
+ esac
+done
+
+
+#Prefix those with WORKSPACE so it can be set to something other than /opt
+[ -f ${WORKSPACE}/opt/config/env_name.txt ] && DEP_ENV=$(cat ${WORKSPACE}/opt/config/env_name.txt) || echo ${DEP_ENV}
+[ -f ${WORKSPACE}/opt/config/nexus_username.txt ] && NEXUS_USERNAME=$(cat ${WORKSPACE}/opt/config/nexus_username.txt) || NEXUS_USERNAME=release
+[ -f ${WORKSPACE}/opt/config/nexus_password.txt ] && NEXUS_PASSWD=$(cat ${WORKSPACE}/opt/config/nexus_password.txt) || NEXUS_PASSWD=sfWU3DFVdBr7GVxB85mTYgAW
+[ -f ${WORKSPACE}/opt/config/nexus_docker_repo.txt ] && NEXUS_DOCKER_REPO=$(cat ${WORKSPACE}/opt/config/nexus_docker_repo.txt) || NEXUS_DOCKER_REPO=nexus3.onap.org:${PORT}
+[ -f ${WORKSPACE}/opt/config/nexus_username.txt ] && docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
+
+
+export IP=`ip route get 8.8.8.8 | awk '/src/{ print $7 }'`
+#If OSX, then use this to get IP
+if [[ "$OSTYPE" == "darwin"* ]]; then
+ export IP=$(ipconfig getifaddr en0)
+fi
+export PREFIX=${NEXUS_DOCKER_REPO}'/onap'
+
+if [ ${LOCAL} = true ]; then
+ PREFIX='onap'
+fi
+
+echo ""
+
+if [ -z "${DOCKER}" ]; then
+ cleanup all
+ dir_perms
+ sdc-cs
+ sdc-cs-init
+ sdc-cs-onboard-init
+ sdc-onboard-BE
+ sdc-BE
+ sdc-BE-init
+ sdc-FE
+ healthCheck
+ sdc-sim
+ sdc-api-tests
+ sdc-ui-tests
+else
+ cleanup ${DOCKER}
+ dir_perms
+ ${DOCKER}
+ healthCheck
+fi
diff --git a/scripts/sdc/setup_sdc_for_sanity.sh b/scripts/sdc/setup_sdc_for_sanity.sh
index 90ffa23..1a32397 100644
--- a/scripts/sdc/setup_sdc_for_sanity.sh
+++ b/scripts/sdc/setup_sdc_for_sanity.sh
@@ -48,7 +48,7 @@
set -exo pipefail
echo "This is ${WORKSPACE}/scripts/sdc/setup_sdc_for_sanity.sh"
-
+echo "lets check what is ${1} ${2}"
ENABLE_SIMULATOR=
case "$1" in
tad|tud)
@@ -58,16 +58,17 @@
'')
# we will just setup sdc - no tests
export TEST_SUITE=""
-
- # this is mandatory
ENABLE_SIMULATOR="--simulator"
+ # this is mandatory
;;
*)
- usage
- exit 1
+ export TEST_SUITE=""
+ ENABLE_SIMULATOR="--simulator"
+ # # usage
+ # exit 1
;;
esac
-
+echo "Lets check is simulator is enabled or not ${ENABLE_SIMULATOR}"
# Clone sdc enviroment template
mkdir -p "${WORKSPACE}/data/environments/"
mkdir -p "${WORKSPACE}/data/clone/"
@@ -78,6 +79,7 @@
if [ -d "$SDC_LOCAL_GITREPO" ] ; then
rm -rf ./sdc
cp -a "$SDC_LOCAL_GITREPO" ./sdc
+ # echo "[skipping copying git repo of sdc]"
else
echo "[ERROR]: Local git repo for sdc does not exist: ${SDC_LOCAL_GITREPO}"
exit 1
diff --git a/tests/sdc-workflow-d/__init__.robot b/tests/sdc-workflow-d/__init__.robot
new file mode 100644
index 0000000..0ee6767
--- /dev/null
+++ b/tests/sdc-workflow-d/__init__.robot
@@ -0,0 +1,2 @@
+*** Settings ***
+Documentation sdc-workflow-D
diff --git a/tests/sdc-workflow-d/global_properties.robot b/tests/sdc-workflow-d/global_properties.robot
new file mode 100644
index 0000000..03de4c4
--- /dev/null
+++ b/tests/sdc-workflow-d/global_properties.robot
@@ -0,0 +1,43 @@
+*** Settings ***
+Documentation store all properties that can change or are used in multiple places here
+... format is all caps with underscores between words and prepended with GLOBAL
+... make sure you prepend them with GLOBAL so that other files can easily see it is from this file.
+
+
+*** Variables ***
+${GLOBAL_APPLICATION_ID} robot-ete
+${GLOBAL_SO_STATUS_PATH} /onap/so/infra/orchestrationRequests/v6/
+${GLOBAL_SELENIUM_BROWSER} chrome
+${GLOBAL_SELENIUM_BROWSER_CAPABILITIES} Create Dictionary
+${GLOBAL_SELENIUM_DELAY} 0
+${GLOBAL_SELENIUM_BROWSER_IMPLICIT_WAIT} 5
+${GLOBAL_SELENIUM_BROWSER_WAIT_TIMEOUT} 15
+${GLOBAL_OPENSTACK_HEAT_SERVICE_TYPE} orchestration
+${GLOBAL_OPENSTACK_CINDER_SERVICE_TYPE} volume
+${GLOBAL_OPENSTACK_NOVA_SERVICE_TYPE} compute
+${GLOBAL_OPENSTACK_NEUTRON_SERVICE_TYPE} network
+${GLOBAL_OPENSTACK_GLANCE_SERVICE_TYPE} image
+${GLOBAL_OPENSTACK_KEYSTONE_SERVICE_TYPE} identity
+${GLOBAL_OPENSTACK_STACK_DEPLOYMENT_TIMEOUT} 600s
+${GLOBAL_AAI_CLOUD_OWNER} CloudOwner
+${GLOBAL_AAI_CLOUD_OWNER_DEFINED_TYPE} OwnerType
+${GLOBAL_AAI_COMPLEX_NAME} clli1
+${GLOBAL_AAI_PHYSICAL_LOCATION_ID} clli1
+${GLOBAL_AAI_AVAILABILITY_ZONE_NAME} nova
+${GLOBAL_BUILD_NUMBER} 0
+${GLOBAL_OWNING_ENTITY_NAME} OE-Demonstration
+${GLOBAL_VID_UI_TIMEOUT_SHORT} 20s
+${GLOBAL_VID_UI_TIMEOUT_MEDIUM} 60s
+${GLOBAL_VID_UI_TIMEOUT_LONG} 120s
+${GLOBAL_AAI_INDEX_PATH} /aai/v14
+${GLOBAL_AAI_ZONE_ID} nova1
+${GLOBAL_AAI_ZONE_NAME} nova
+${GLOBAL_AAI_DESIGN_TYPE} integration
+${GLOBAL_AAI_ZONE_CONTEXT} labs
+${GLOBAL_TEMPLATE_FOLDER} robot/assets/templates
+${GLOBAL_ASSETS_FOLDER} robot/assets
+${GLOBAL_SERVICE_MAPPING_DIRECTORY} ./demo/service_mapping
+${GLOBAL_SO_HEALTH_CHECK_PATH} /manage/health
+${GLOBAL_SO_CLOUD_CONFIG_PATH} /cloudSite
+${GLOBAL_SO_CLOUD_CONFIG_TEMPLATE} so/create_cloud_config.jinja
+${GLOBAL_SO_ORCHESTRATION_REQUESTS_PATH} /onap/so/infra/orchestrationRequests/v7
diff --git a/tests/sdc-workflow-d/test1.robot b/tests/sdc-workflow-d/test1.robot
new file mode 100644
index 0000000..6217f2a
--- /dev/null
+++ b/tests/sdc-workflow-d/test1.robot
@@ -0,0 +1,112 @@
+*** Settings ***
+Documentation This is the basic test for workflow designer
+Library RequestsLibrary
+Library Collections
+Library SeleniumLibrary
+Resource global_properties.robot
+
+*** Variables ***
+${HOMEPAGE} http://localhost:8285
+${HEADLESS} True
+
+***Keywords***
+
+Open SDC GUI
+ [Documentation] Logs in to SDC GUI
+ [Arguments] ${PATH}
+ ## Setup Browever now being managed by the test case
+ ##Setup Browser
+ Go To ${HOMEPAGE}${PATH}
+ Maximize Browser Window
+
+ # Set Browser Implicit Wait ${GLOBAL_SELENIUM_BROWSER_IMPLICIT_WAIT}
+ # Log Logging in to ${SDC_FE_ENDPOINT}${PATH}
+ Wait Until Page Contains Jimmy
+ # Log Logged in to ${SDC_FE_ENDPOINT}${PATH}
+
+Setup Browser
+ [Documentation] Sets up browser based upon the value of ${GLOBAL_SELENIUM_BROWSER}
+ Run Keyword If '${GLOBAL_SELENIUM_BROWSER}' == 'firefox' Setup Browser Firefox
+ Run Keyword If '${GLOBAL_SELENIUM_BROWSER}' == 'chrome' Setup Browser Chrome
+ Log Running with ${GLOBAL_SELENIUM_BROWSER}
+
+Setup Browser Firefox
+ ${caps}= Evaluate sys.modules['selenium.webdriver'].common.desired_capabilities.DesiredCapabilities.FIREFOX sys
+ Set To Dictionary ${caps} marionette=
+ Set To Dictionary ${caps} elementScrollBehavior 1
+ ${wd}= Create WebDriver Firefox capabilities=${caps}
+ Set Global Variable ${GLOBAL_SELENIUM_BROWSER_CAPABILITIES} ${caps}
+
+
+Setup Browser Chrome
+ ${chrome options}= Evaluate sys.modules['selenium.webdriver'].ChromeOptions() sys
+ Call Method ${chrome options} add_argument no-sandbox
+ Call Method ${chrome options} add_argument ignore-certificate-errors
+ Run Keyword If ${HEADLESS}==True Call Method ${chrome options} add_argument headless
+ ${dc} Evaluate sys.modules['selenium.webdriver'].DesiredCapabilities.CHROME sys, selenium.webdriver
+ Set To Dictionary ${dc} elementScrollBehavior 1
+ Set To Dictionary ${dc} ACCEPT_SSL_CERTS True
+ Create Webdriver Chrome chrome_options=${chrome_options} desired_capabilities=${dc}
+ Set Global Variable ${GLOBAL_SELENIUM_BROWSER_CAPABILITIES} ${dc}
+
+Input Username
+ [Arguments] ${username}
+ Input Text name=userId ${username}
+
+Input Password
+ [Arguments] ${password}
+ Input Text name=password ${password}
+
+Input Name
+ [Arguments] ${workflowName}
+ Input Text id=workflowName ${workflowName}
+
+Input Description
+ [Arguments] ${description}
+ Input Text xpath=/html/body/div[2]/div/div[2]/div/form/div/div[1]/div[2]/div/textarea ${description}
+
+Input WFdescription
+ [Arguments] ${description}
+ Input Text xpath=//*[@id="root"]/div[1]/div/div[2]/div[2]/div/div[1]/div/textarea
+
+Submit Login Button
+ Click Element xpath=/html/body/form/input[3]
+
+Submit WorkFlow Button
+ Click Element xpath=/html/body/div/home-page/div/top-nav/nav/ul/li[5]/a
+
+Add WorkFlow
+ Click Element xpath=//*[@id="root"]/div[1]/div/div[2]/div/div[2]/div[1]
+ # Click Element xpath=//*[@id="root"]/div[1]/div/div[2]/div/div[2]/div[1]/div[1]/div/svg
+
+Create Workflow
+ Click Element xpath=/html/body/div[2]/div/div[2]/div/form/div/div[2]/button[1]
+
+Goto Frame
+ Select Frame xpath=/html/body/div/plugin-tab-view/div/plugin-frame/div/div/iframe
+
+Save WorkFlow
+ Click Element xpath=//*[@id="root"]/div[1]/div/div[1]/div[2]/div[2]/div/div/div[2]/div/div/span
+
+*** Test Cases ***
+Workflow Designer Testing
+ [Documentation] User can homepage and see the tag line
+ Setup Browser
+ Open SDC GUI /login
+ Input Username cs0008
+ Input Password 123123a
+ Submit Login Button
+ Wait Until Page Contains WORKFLOW
+ Submit WorkFlow Button
+ BuiltIn.Sleep 5s
+ Goto Frame
+ Add WorkFlow
+ BuiltIn.Sleep 5s
+ Input Name testing7
+ Input Description first test through selenium
+ Create Workflow
+ # Wait Until Page Contains General
+ # Input Description2 write some dummy description
+ # Save WorkFlow
+ # BuiltIn.Sleep 5s
+ Close Browser
\ No newline at end of file