Support for test of PMS persistency
Minor updates to support for external image repo"
Issue-ID: NONRTRIC-486
Signed-off-by: BjornMagnussonXA <bjorn.magnusson@est.tech>
Change-Id: I95ad81726a7bec776f388c6443792fffbe554db7
diff --git a/test/common/README.md b/test/common/README.md
index 99ee015..8553519 100644
--- a/test/common/README.md
+++ b/test/common/README.md
@@ -146,8 +146,8 @@
| `--use-snapshot-image` | The script will use images from the nexus snapshot repo for the supplied apps, space separated list of app short names |
| `--use-staging-image` | The script will use images from the nexus staging repo for the supplied apps, space separated list of app short names |
| `--use-release-image` | The script will use images from the nexus release repo for the supplied apps, space separated list of app short names |
-| `--image-repo` | Url to image repo. Only required in when running in multi-node kube cluster, otherwise optional. All used images will be re-tagged and pushed to this repo
-
+| `--image-repo` | Url to optional image repo. Only locally built images will be re-tagged and pushed to this repo |
+| `--cluster-timeout` | Optional timeout for cluster where it takes time to obtain external ip/host-name. Timeout in seconds |
| `help` | Print this info along with the test script description and the list of app short names supported |
## Function: setup_testenvironment
@@ -1004,8 +1004,14 @@
|--|
| None |
-## Function: restart_ecs ##
-Restart the ECS container.
+## Function: stop_ecs ##
+Stop the ECS container.
+| arg list |
+|--|
+| None |
+
+## Function: start_stopped_ecs ##
+Start a previously stopped ecs.
| arg list |
|--|
| None |
diff --git a/test/common/agent_api_functions.sh b/test/common/agent_api_functions.sh
index 0c2e48a..cb48d78 100644
--- a/test/common/agent_api_functions.sh
+++ b/test/common/agent_api_functions.sh
@@ -96,6 +96,9 @@
# Make curl retries towards the agent for http response codes set in this env var, space separated list of codes
AGENT_RETRY_CODES=""
+#Save first worker node the pod is started on
+__PA_WORKER_NODE=""
+
###########################
### Policy Agents functions
###########################
@@ -203,6 +206,13 @@
export POLICY_AGENT_CONFIG_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-config"
export POLICY_AGENT_DATA_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-data"
export POLICY_AGENT_PKG_NAME
+
+ export POLICY_AGENT_DATA_PV_NAME=$POLICY_AGENT_APP_NAME"-pv"
+ export POLICY_AGENT_DATA_PVC_NAME=$POLICY_AGENT_APP_NAME"-pvc"
+ ##Create a unique path for the pv each time to prevent a previous volume to be reused
+ export POLICY_AGENT_PV_PATH="padata-"$(date +%s)
+ export POLICY_AGENT_CONTAINER_MNT_DIR
+
if [ $1 == "PROXY" ]; then
AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT #Set if proxy is started
AGENT_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
@@ -237,6 +247,16 @@
output_yaml=$PWD/tmp/pa_cfd.yaml
__kube_create_configmap $POLICY_AGENT_DATA_CONFIGMAP_NAME $KUBE_NONRTRIC_NAMESPACE autotest PA $data_json $output_yaml
+ ## Create pv
+ input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"pv.yaml
+ output_yaml=$PWD/tmp/pa_pv.yaml
+ __kube_create_instance pv $POLICY_AGENT_APP_NAME $input_yaml $output_yaml
+
+ ## Create pvc
+ input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"pvc.yaml
+ output_yaml=$PWD/tmp/pa_pvc.yaml
+ __kube_create_instance pvc $POLICY_AGENT_APP_NAME $input_yaml $output_yaml
+
# Create service
input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"svc.yaml
output_yaml=$PWD/tmp/pa_svc.yaml
@@ -249,6 +269,12 @@
fi
+ # Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
+ __PA_WORKER_NODE=$(kubectl get pod -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+ if [ -z "$__PA_WORKER_NODE" ]; then
+ echo -e $YELLOW" Cannot find worker node for pod for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
+ fi
+
echo " Retrieving host and ports for service..."
PA_HOST_NAME=$(__kube_get_service_host $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
POLICY_AGENT_EXTERNAL_PORT=$(__kube_get_service_port $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
@@ -274,6 +300,25 @@
exit
fi
+ curdir=$PWD
+ cd $SIM_GROUP
+ cd policy_agent
+ cd $POLICY_AGENT_HOST_MNT_DIR
+ #cd ..
+ if [ -d db ]; then
+ if [ "$(ls -A $DIR)" ]; then
+ echo -e $BOLD" Cleaning files in mounted dir: $PWD/db"$EBOLD
+ rm -rf db/* &> /dev/null
+ if [ $? -ne 0 ]; then
+ echo -e $RED" Cannot remove database files in: $PWD"$ERED
+ exit 1
+ fi
+ fi
+ else
+ echo " No files in mounted dir or dir does not exists"
+ fi
+ cd $curdir
+
#Export all vars needed for docker-compose
export POLICY_AGENT_APP_NAME
export POLICY_AGENT_APP_NAME_ALIAS
@@ -291,6 +336,7 @@
export POLICY_AGENT_CONFIG_FILE
export POLICY_AGENT_PKG_NAME
export POLICY_AGENT_DISPLAY_NAME
+ export POLICY_AGENT_CONTAINER_MNT_DIR
if [ $1 == "PROXY" ]; then
AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT #Set if proxy is started
@@ -320,6 +366,79 @@
return 0
}
+# Stop the policy agent
+# args: -
+# args: -
+# (Function for test scripts)
+stop_policy_agent() {
+ echo -e $BOLD"Stopping $POLICY_AGENT_DISPLAY_NAME"$EBOLD
+
+ if [ $RUNMODE == "KUBE" ]; then
+ __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest PA
+ echo " Deleting the replica set - a new will be started when the app is started"
+ tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=PA")
+ if [ $? -ne 0 ]; then
+ echo -e $RED" Could not delete replica set "$RED
+ ((RES_CONF_FAIL++))
+ return 1
+ fi
+ else
+ docker stop $POLICY_AGENT_APP_NAME &> ./tmp/.dockererr
+ if [ $? -ne 0 ]; then
+ __print_err "Could not stop $POLICY_AGENT_APP_NAME" $@
+ cat ./tmp/.dockererr
+ ((RES_CONF_FAIL++))
+ return 1
+ fi
+ fi
+ echo -e $BOLD$GREEN"Stopped"$EGREEN$EBOLD
+ echo ""
+ return 0
+}
+
+# Start a previously stopped policy agent
+# args: -
+# (Function for test scripts)
+start_stopped_policy_agent() {
+ echo -e $BOLD"Starting (the previously stopped) $POLICY_AGENT_DISPLAY_NAME"$EBOLD
+
+ if [ $RUNMODE == "KUBE" ]; then
+
+ # Tie the PMS to the same worker node it was initially started on
+ # A PVC of type hostPath is mounted to PMS, for persistent storage, so the PMS must always be on the node which mounted the volume
+ if [ -z "$__PA_WORKER_NODE" ]; then
+ echo -e $RED" No initial worker node found for pod "$RED
+ ((RES_CONF_FAIL++))
+ return 1
+ else
+ echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__PA_WORKER_NODE to deployment for $POLICY_AGENT_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
+ echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
+ tmp=$(kubectl patch deployment $POLICY_AGENT_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__PA_WORKER_NODE'"}}}}}')
+ if [ $? -ne 0 ]; then
+ echo -e $YELLOW" Cannot set nodeSelector to deployment for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
+ fi
+ __kube_scale deployment $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+ fi
+
+ else
+ docker start $POLICY_AGENT_APP_NAME &> ./tmp/.dockererr
+ if [ $? -ne 0 ]; then
+ __print_err "Could not start (the stopped) $POLICY_AGENT_APP_NAME" $@
+ cat ./tmp/.dockererr
+ ((RES_CONF_FAIL++))
+ return 1
+ fi
+ fi
+ __check_service_start $POLICY_AGENT_APP_NAME $PA_PATH$POLICY_AGENT_ALIVE_URL
+ if [ $? -ne 0 ]; then
+ return 1
+ fi
+ echo ""
+ return 0
+}
+
+
+
# Load the the appl config for the agent into a config map
agent_load_config() {
echo -e $BOLD"Agent - load config from "$EBOLD$1
diff --git a/test/common/cr_api_functions.sh b/test/common/cr_api_functions.sh
index 437b207..134f50c 100644
--- a/test/common/cr_api_functions.sh
+++ b/test/common/cr_api_functions.sh
@@ -35,7 +35,7 @@
# <pull-policy-original> Shall be used for images that does not allow overriding
# Both var may contain: 'remote', 'remote-remove' or 'local'
__CR_imagepull() {
- echo -e $RED" Image for app CR shall never be pulled from remove repo"$ERED
+ echo -e $RED" Image for app CR shall never be pulled from remote repo"$ERED
}
# Build image (only for simulator or interfaces stubs owned by the test environment)
diff --git a/test/common/delete_policies_process.py b/test/common/delete_policies_process.py
index 4ce8bc4..ec69e13 100644
--- a/test/common/delete_policies_process.py
+++ b/test/common/delete_policies_process.py
@@ -84,7 +84,7 @@
retry_cnt -= 1
total_retry_count += 1
else:
- print("1Delete failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: "+str(resp.status_code))
+ print("1Delete failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: "+str(resp.status_code)+str(resp.raw))
sys.exit()
else:
retry_cnt=-1
diff --git a/test/common/ecs_api_functions.sh b/test/common/ecs_api_functions.sh
index 525ac8b..ba6af92 100644
--- a/test/common/ecs_api_functions.sh
+++ b/test/common/ecs_api_functions.sh
@@ -96,6 +96,9 @@
# Make curl retries towards ECS for http response codes set in this env var, space separated list of codes
ECS_RETRY_CODES=""
+#Save first worker node the pod is started on
+__ECS_WORKER_NODE=""
+
###########################
### ECS functions
###########################
@@ -205,6 +208,7 @@
export ECS_CONTAINER_MNT_DIR
export ECS_DATA_PV_NAME=$ECS_APP_NAME"-pv"
+ export ECS_DATA_PVC_NAME=$ECS_APP_NAME"-pvc"
#Create a unique path for the pv each time to prevent a previous volume to be reused
export ECS_PV_PATH="ecsdata-"$(date +%s)
@@ -251,6 +255,15 @@
__kube_create_instance app $ECS_APP_NAME $input_yaml $output_yaml
fi
+ # Tie the ECS to a worker node so that ECS will always be scheduled to the same worker node if the ECS pod is restarted
+ # A PVC of type hostPath is mounted to ECS, for persistent storage, so the ECS must always be on the node which mounted the volume
+
+ # Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
+ __ECS_WORKER_NODE=$(kubectl get pod -l "autotest=ECS" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+ if [ -z "$__ECS_WORKER_NODE" ]; then
+ echo -e $YELLOW" Cannot find worker node for pod for $ECS_APP_NAME, persistency may not work"$EYELLOW
+ fi
+
echo " Retrieving host and ports for service..."
ECS_HOST_NAME=$(__kube_get_service_host $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
ECS_EXTERNAL_PORT=$(__kube_get_service_port $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
@@ -337,20 +350,73 @@
return 0
}
-# Restart ECS
+# Stop the ecs
+# args: -
# args: -
# (Function for test scripts)
-restart_ecs() {
- echo -e $BOLD"Re-starting ECS"$EBOLD
- docker restart $ECS_APP_NAME &> ./tmp/.dockererr
+stop_ecs() {
+ echo -e $BOLD"Stopping $ECS_DISPLAY_NAME"$EBOLD
+
+ if [ $RUNMODE == "KUBE" ]; then
+ __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest ECS
+ echo " Deleting the replica set - a new will be started when the app is started"
+ tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=ECS")
+ if [ $? -ne 0 ]; then
+ echo -e $RED" Could not delete replica set "$RED
+ ((RES_CONF_FAIL++))
+ return 1
+ fi
+ else
+ docker stop $ECS_APP_NAME &> ./tmp/.dockererr
+ if [ $? -ne 0 ]; then
+ __print_err "Could not stop $ECS_APP_NAME" $@
+ cat ./tmp/.dockererr
+ ((RES_CONF_FAIL++))
+ return 1
+ fi
+ fi
+ echo -e $BOLD$GREEN"Stopped"$EGREEN$EBOLD
+ echo ""
+ return 0
+}
+
+# Start a previously stopped ecs
+# args: -
+# (Function for test scripts)
+start_stopped_ecs() {
+ echo -e $BOLD"Starting (the previously stopped) $ECS_DISPLAY_NAME"$EBOLD
+
+ if [ $RUNMODE == "KUBE" ]; then
+
+ # Tie the PMS to the same worker node it was initially started on
+ # A PVC of type hostPath is mounted to PMS, for persistent storage, so the PMS must always be on the node which mounted the volume
+ if [ -z "$__ECS_WORKER_NODE" ]; then
+ echo -e $RED" No initial worker node found for pod "$RED
+ ((RES_CONF_FAIL++))
+ return 1
+ else
+ echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__ECS_WORKER_NODE to deployment for $ECS_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
+ echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
+ tmp=$(kubectl patch deployment $ECS_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__ECS_WORKER_NODE'"}}}}}')
+ if [ $? -ne 0 ]; then
+ echo -e $YELLOW" Cannot set nodeSelector to deployment for $ECS_APP_NAME, persistency may not work"$EYELLOW
+ fi
+ __kube_scale deployment $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+ fi
+
+ else
+ docker start $ECS_APP_NAME &> ./tmp/.dockererr
+ if [ $? -ne 0 ]; then
+ __print_err "Could not start (the stopped) $ECS_APP_NAME" $@
+ cat ./tmp/.dockererr
+ ((RES_CONF_FAIL++))
+ return 1
+ fi
+ fi
+ __check_service_start $ECS_APP_NAME $ECS_PATH$ECS_ALIVE_URL
if [ $? -ne 0 ]; then
- __print_err "Could not restart $ECS_APP_NAME" $@
- cat ./tmp/.dockererr
- ((RES_CONF_FAIL++))
return 1
fi
-
- __check_service_start $ECS_APP_NAME $ECS_PATH$ECS_ALIVE_URL
echo ""
return 0
}
diff --git a/test/common/kube_proxy_api_functions.sh b/test/common/kube_proxy_api_functions.sh
index 374f3ec..5cac74c 100644
--- a/test/common/kube_proxy_api_functions.sh
+++ b/test/common/kube_proxy_api_functions.sh
@@ -83,14 +83,6 @@
#######################################################
-
-## Access to Kube Http Proxy
-# Host name may be changed if app started by kube
-# Direct access from script
-#BMXX KUBE_PROXY_HTTPX="http"
-#BMXX KUBE_PROXY_HOST_NAME=$LOCALHOST_NAME
-#BMXX KUBE_PROXY_PATH=$KUBE_PROXY_HTTPX"://"$KUBE_PROXY_HOST_NAME":"$KUBE_PROXY_WEB_EXTERNAL_PORT
-
#########################
### Http Proxy functions
#########################
@@ -157,27 +149,56 @@
echo " Retrieving host and ports for service..."
CLUSTER_KUBE_PROXY="http"
- CLUSTER_KUBE_PROXY_HOST=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
- if [[ $CLUSTER_KUBE_PROXY_HOST == *"kubernetes"* ]]; then
- echo -e $YELLOW" The cluster host is: $CLUSTER_KUBE_PROXY_HOST. The proxy (mitmproxy) used by test script requires an ip so the ip is assumed and set to 127.0.0.1"
+
+ #Finding host of the proxy
+ echo " Trying to find svc hostname..."
+ CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE -o jsonpath={.status.loadBalancer.ingress[0].hostname}")
+
+
+ if [ "$CLUSTER_KUBE_PROXY_HOST" == "localhost" ]; then
+ #Local host found
+ echo -e $YELLOW" The test environment svc $KUBE_PROXY_APP_NAME host is: $CLUSTER_KUBE_PROXY_HOST. The proxy (mitmproxy) used by test script requires an ip so the ip is assumed and set to 127.0.0.1"$EYELLOW
CLUSTER_KUBE_PROXY_HOST="127.0.0.1"
+ else
+ if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
+ #Host of proxy not found, trying to find the ip....
+ echo " Trying to find svc ip..."
+ CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE -o jsonpath={.status.loadBalancer.ingress[0].ip}")
+ if [ ! -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
+ #Host ip found
+ echo -e $YELLOW" The test environment svc $KUBE_PROXY_APP_NAME ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
+ fi
+ else
+ #Host or ip of proxy found
+ echo -e $YELLOW" The test environment host/ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
+ fi
fi
- CLUSTER_KUBE_PROXY_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http") # port for proxy access
- KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web") # web port, only for alive test
+ if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
+ #Host/ip of proxy not found, try to use the cluster and the nodeports of the proxy
+ CLUSTER_KUBE_PROXY_HOST=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
+ echo -e $YELLOW" The test environment cluster ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
+ CLUSTER_KUBE_PROXY_PORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http") # port for proxy access
+ KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web") # web port, only for alive test
+ echo " Cluster ip/host, cluster http nodeport, cluster web nodeport: $CLUSTER_KUBE_PROXY_HOST $CLUSTER_KUBE_PROXY_PORT $KUBE_PROXY_WEB_NODEPORT"
+ else
+ #Find the service ports of the proxy
+ CLUSTER_KUBE_PROXY_PORT=$(__kube_get_service_port $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http") # port for proxy access
+ KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_port $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web") # web port, only for alive test
+ echo " Proxy ip/host, proxy http port, proxy web port: $CLUSTER_KUBE_PROXY_HOST $CLUSTER_KUBE_PROXY_PORT $KUBE_PROXY_WEB_NODEPORT"
+ fi
KUBE_PROXY_WEB_PATH=$CLUSTER_KUBE_PROXY"://"$CLUSTER_KUBE_PROXY_HOST":"$KUBE_PROXY_WEB_NODEPORT
- echo " Cluster ip/host, cluster http nodeport cluster web nodeport: $CLUSTER_KUBE_PROXY_HOST $CLUSTER_KUBE_PROXY_NODEPORT $KUBE_PROXY_WEB_NODEPORT"
-
export KUBE_PROXY_PATH= # Make sure proxy is empty when checking the proxy itself
__check_service_start $KUBE_PROXY_APP_NAME $KUBE_PROXY_WEB_PATH$KUBE_PROXY_ALIVE_URL
# Set proxy for all subsequent calls for all services etc
- export KUBE_PROXY_PATH=$CLUSTER_KUBE_PROXY"://"$CLUSTER_KUBE_PROXY_HOST":"$CLUSTER_KUBE_PROXY_NODEPORT
+ export KUBE_PROXY_PATH=$CLUSTER_KUBE_PROXY"://"$CLUSTER_KUBE_PROXY_HOST":"$CLUSTER_KUBE_PROXY_PORT
else
echo $YELLOW" Kube http proxy not needed in docker test. App not started"
fi
echo ""
+
}
diff --git a/test/common/mr_api_functions.sh b/test/common/mr_api_functions.sh
index 1c2f155..25b5172 100644
--- a/test/common/mr_api_functions.sh
+++ b/test/common/mr_api_functions.sh
@@ -44,7 +44,7 @@
# <pull-policy-original> Shall be used for images that does not allow overriding
# Both var may contain: 'remote', 'remote-remove' or 'local'
__MR_imagepull() {
- echo -e $RED"Image for app CR shall never be pulled from remove repo"$ERED
+ echo -e $RED"Image for app MR shall never be pulled from remote repo"$ERED
}
# Pull image from remote repo or use locally built image
diff --git a/test/common/prodstub_api_functions.sh b/test/common/prodstub_api_functions.sh
index ae3f193..744e357 100644
--- a/test/common/prodstub_api_functions.sh
+++ b/test/common/prodstub_api_functions.sh
@@ -35,7 +35,7 @@
# <pull-policy-original> Shall be used for images that does not allow overriding
# Both var may contain: 'remote', 'remote-remove' or 'local'
__PRODSTUB_imagepull() {
- echo -e $RED"Image for app PRODSTUB shall never be pulled from remove repo"$ERED
+ echo -e $RED"Image for app PRODSTUB shall never be pulled from remote repo"$ERED
}
# Build image (only for simulator or interfaces stubs owned by the test environment)
diff --git a/test/common/ricsimulator_api_functions.sh b/test/common/ricsimulator_api_functions.sh
index bf30310..785ff9a 100644
--- a/test/common/ricsimulator_api_functions.sh
+++ b/test/common/ricsimulator_api_functions.sh
@@ -314,7 +314,7 @@
__find_sim_host() {
if [ $RUNMODE == "KUBE" ]; then
ricname=$(echo "$1" | tr '_' '-')
- for timeout in {1..60}; do
+ for timeout in {1..500}; do # long waiting time needed in case of starting large number of sims
host=$(kubectl get pod $ricname -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.status.podIP}' 2> /dev/null)
if [ ! -z "$host" ]; then
echo $RIC_SIM_HTTPX"://"$host":"$RIC_SIM_PORT
diff --git a/test/common/test_env-oran-dawn.sh b/test/common/test_env-oran-dawn.sh
index 1890b71..7ecd434 100755
--- a/test/common/test_env-oran-dawn.sh
+++ b/test/common/test_env-oran-dawn.sh
@@ -226,6 +226,7 @@
POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container for data file
POLICY_AGENT_CONFIG_FILE="application.yaml" # Container config file name
POLICY_AGENT_DATA_FILE="application_configuration.json" # Container data file name
+POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
ECS_APP_NAME="enrichmentservice" # Name for ECS container
ECS_DISPLAY_NAME="Enrichment Coordinator Service" # Display name for ECS container
diff --git a/test/common/testcase_common.sh b/test/common/testcase_common.sh
index b232577..1f6d135 100755
--- a/test/common/testcase_common.sh
+++ b/test/common/testcase_common.sh
@@ -27,6 +27,7 @@
echo "Args: remote|remote-remove docker|kube --env-file <environment-filename> [release] [auto-clean] [--stop-at-error] "
echo " [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+] [--use-snapshot-image <app-nam>+]"
echo " [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
+ echo " [--cluster-timeout <timeout-in seconds>]"
}
if [ $# -eq 1 ] && [ "$1" == "help" ]; then
@@ -51,7 +52,8 @@
echo "--use-snapshot-image - The script will use images from the nexus snapshot repo for the supplied apps, space separated list of app short names"
echo "--use-staging-image - The script will use images from the nexus staging repo for the supplied apps, space separated list of app short names"
echo "--use-release-image - The script will use images from the nexus release repo for the supplied apps, space separated list of app short names"
- echo "--image-repo - Url to image repo. Only required in when running in multi-node kube cluster, otherwise optional. All used images will be re-tagged and pushed to this repo"
+ echo "--image-repo - Url to optional image repo. Only locally built images will be re-tagged and pushed to this repo"
+ echo "--cluster-timeout - Optional timeout for cluster where it takes time to obtain external ip/host-name. Timeout in seconds. "
echo ""
echo "List of app short names supported: "$APP_SHORT_NAMES
exit 0
@@ -303,7 +305,7 @@
# If this is set, all used images will be re-tagged and pushed to this repo before any
IMAGE_REPO_ADR=""
-
+CLUSTER_TIME_OUT=0
echo "-------------------------------------------------------------------------------------------------"
echo "----------------------------------- Test case: "$ATC
@@ -523,6 +525,32 @@
fi
fi
fi
+ if [ $paramerror -eq 0 ]; then
+ if [ "$1" == "--cluster-timeout" ]; then
+ shift;
+ CLUSTER_TIME_OUT=$1
+ if [ -z "$1" ]; then
+ paramerror=1
+ if [ -z "$paramerror_str" ]; then
+ paramerror_str="No timeout value found for : '--cluster-timeout'"
+ fi
+ else
+ #Check if positive int
+ case ${CLUSTER_TIME_OUT#[+]} in
+ *[!0-9]* | '')
+ paramerror=1
+ if [ -z "$paramerror_str" ]; then
+ paramerror_str="Value for '--cluster-timeout' not an int : "$CLUSTER_TIME_OUT
+ fi
+ ;;
+ * ) ;; # Ok
+ esac
+ echo "Option set - Cluster timeout: "$1
+ shift;
+ foundparm=0
+ fi
+ fi
+ fi
done
echo ""
@@ -707,7 +735,7 @@
echo -e "$tmp" >> $image_list_file
#Export the env var
export "${2}"=$image":"$tag #Note, this var may be set to the value of the target value below in __check_and_pull_image
- if [ ! -z "$IMAGE_REPO_ADR" ]; then
+ if [ ! -z "$IMAGE_REPO_ADR" ] && [ $5 == "LOCAL" ]; then # Only push local images if repo is given
export "${2}_SOURCE"=$image":"$tag #Var to keep the actual source image
export "${2}_TARGET"=$IMAGE_REPO_ADR"/"$optional_image_repo_target":"$tag #Create image + tag for optional image repo - pushed later if needed
else
@@ -1047,40 +1075,44 @@
# The following sequence pull the configured images
- echo -e $BOLD"Pulling configured images, if needed"$EBOLD
- for imagename in $APP_SHORT_NAMES; do
- __check_included_image $imagename
- incl=$?
- __check_project_image $imagename
- proj=$?
- if [ $incl -eq 0 ]; then
- if [ $proj -eq 0 ]; then
- START_ARG_MOD=$START_ARG
- __check_image_local_override $imagename
- if [ $? -eq 1 ]; then
- START_ARG_MOD="local"
+ echo -e $BOLD"Pulling configured images, if needed"$EBOLD
+ if [ ! -z "$IMAGE_REPO_ADR" ]; then
+ echo -e $YELLOW" Excluding all remote image check/pull when running with image repo: $IMAGE_REPO_ADR"$EYELLOW
+ else
+ for imagename in $APP_SHORT_NAMES; do
+ __check_included_image $imagename
+ incl=$?
+ __check_project_image $imagename
+ proj=$?
+ if [ $incl -eq 0 ]; then
+ if [ $proj -eq 0 ]; then
+ START_ARG_MOD=$START_ARG
+ __check_image_local_override $imagename
+ if [ $? -eq 1 ]; then
+ START_ARG_MOD="local"
+ fi
+ else
+ START_ARG_MOD=$START_ARG
+ fi
+ __check_image_local_build $imagename
+ #No pull of images built locally
+ if [ $? -ne 0 ]; then
+ # A function name is created from the app short name
+ # for example app short name 'HTTPPROXY' -> produce the function
+ # name __HTTPPROXY_imagesetup
+ # This function is called and is expected to exist in the imported
+ # file for the httpproxy test functions
+ # The resulting function impl will call '__check_and_pull_image' function
+ # with appropriate parameters
+ function_pointer="__"$imagename"_imagepull"
+ $function_pointer $START_ARG_MOD $START_ARG
fi
else
- START_ARG_MOD=$START_ARG
+ echo -e $YELLOW" Excluding $imagename image from image check/pull"$EYELLOW
fi
- __check_image_local_build $imagename
- #No pull of images built locally
- if [ $? -ne 0 ]; then
- # A function name is created from the app short name
- # for example app short name 'HTTPPROXY' -> produce the function
- # name __HTTPPROXY_imagesetup
- # This function is called and is expected to exist in the imported
- # file for the httpproxy test functions
- # The resulting function impl will call '__check_and_pull_image' function
- # with appropriate parameters
- function_pointer="__"$imagename"_imagepull"
- $function_pointer $START_ARG_MOD $START_ARG
- fi
- else
- echo -e $YELLOW" Excluding $imagename image from image check/pull"$EYELLOW
- fi
- done
+ done
+ fi
#Errors in image setting - exit
if [ $IMAGE_ERR -ne 0 ]; then
@@ -1126,8 +1158,8 @@
echo ""
- # Create a table of the images used in the script
- echo -e $BOLD"Local docker registry images used in the this test script"$EBOLD
+ # Create a table of the images used in the script - from local repo
+ echo -e $BOLD"Local docker registry images used in this test script"$EBOLD
docker_tmp_file=./tmp/.docker-images-table
format_string="{{.Repository}}\\t{{.Tag}}\\t{{.CreatedSince}}\\t{{.Size}}\\t{{.CreatedAt}}"
@@ -1136,40 +1168,85 @@
for imagename in $APP_SHORT_NAMES; do
__check_included_image $imagename
if [ $? -eq 0 ]; then
- # A function name is created from the app short name
- # for example app short name 'MR' -> produce the function
- # name __MR_imagebuild
- # This function is called and is expected to exist in the imported
- # file for the mr test functions
- # The resulting function impl shall build the imagee
- function_pointer="__"$imagename"_image_data"
- $function_pointer "$format_string" $docker_tmp_file
+ # Only print image data if image repo is null, or if image repo is set and image is local
+ print_image_data=0
+ if [ -z "$IMAGE_REPO_ADR" ]; then
+ print_image_data=1
+ else
+ __check_image_local_build $imagename
+ if [ $? -eq 0 ]; then
+ print_image_data=1
+ fi
+ fi
+ if [ $print_image_data -eq 1 ]; then
+ # A function name is created from the app short name
+ # for example app short name 'MR' -> produce the function
+ # name __MR_imagebuild
+ # This function is called and is expected to exist in the imported
+ # file for the mr test functions
+ # The resulting function impl shall build the imagee
+ function_pointer="__"$imagename"_image_data"
+ $function_pointer "$format_string" $docker_tmp_file
+ fi
fi
done
-
column -t -s $'\t' $docker_tmp_file | indent1
echo ""
+
+ if [ ! -z "$IMAGE_REPO_ADR" ]; then
+
+ # Create a table of the images used in the script - from remote repo
+ echo -e $BOLD"Remote repo images used in this test script"$EBOLD
+ echo -e $YELLOW"-- Note: These image will be pulled when the container starts. Images not managed by the test engine --"$EYELLOW
+
+ docker_tmp_file=./tmp/.docker-images-table
+ format_string="{{.Repository}}\\t{{.Tag}}"
+ echo -e "Application\tRepository\tTag" > $docker_tmp_file
+
+ for imagename in $APP_SHORT_NAMES; do
+ __check_included_image $imagename
+ if [ $? -eq 0 ]; then
+ # Only print image data if image repo is null, or if image repo is set and image is local
+ __check_image_local_build $imagename
+ if [ $? -ne 0 ]; then
+ # A function name is created from the app short name
+ # for example app short name 'MR' -> produce the function
+ # name __MR_imagebuild
+ # This function is called and is expected to exist in the imported
+ # file for the mr test functions
+ # The resulting function impl shall build the imagee
+ function_pointer="__"$imagename"_image_data"
+ $function_pointer "$format_string" $docker_tmp_file
+ fi
+ fi
+ done
+
+ column -t -s $'\t' $docker_tmp_file | indent1
+
+ echo ""
+ fi
+
if [ $RUNMODE == "KUBE" ]; then
echo "================================================================================="
echo "================================================================================="
- CLUSTER_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
- if [[ $CLUSTER_IP != *"kubernetes"* ]]; then
- echo -e $YELLOW" The cluster ip is: $CLUSTER_IP. This kubernetes is likely a multi-node cluster."$EYELLOW
- echo -e $YELLOW" The image pull policy is set to 'Never'."$EYELLOW
+ if [ -z "$IMAGE_REPO_ADR" ]; then
+ echo -e $YELLOW" The image pull policy is set to 'Never' - assuming a local image repo is available for all images"$EYELLOW
+ echo -e " This setting only works on single node clusters on the local machine"
+ echo -e " It does not work with multi-node clusters or remote clusters. "
export KUBE_IMAGE_PULL_POLICY="Never"
- if [ -z "$IMAGE_REPO_ADR" ]; then
- echo -e $RED" The flag --image-repo need to be provided to the cmd with the path to a custom image repo'."$ERED
- exit 1
- fi
else
- echo -e $YELLOW" The cluster ip is: $CLUSTER_IP. This kubernetes is likely a single-node cluster on a local machine."$EYELLOW
- echo -e $YELLOW" The image pull policy is set to 'Never'."$EYELLOW
- export KUBE_IMAGE_PULL_POLICY="Never"
+ echo -e $YELLOW" The image pull policy is set to 'Always'"$EYELLOW
+ echo -e " This setting work on local clusters, multi-node clusters and remote cluster. "
+ echo -e " Only locally built images are managed. Remote images are always pulled from remote repos"
+ echo -e " Pulling remote snapshot or staging images my in some case result in pulling newer image versions outside the control of the test engine"
+ export KUBE_IMAGE_PULL_POLICY="Always"
fi
+ CLUSTER_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
+ echo -e $YELLOW" The cluster hostname/ip is: $CLUSTER_IP"$EYELLOW
echo "================================================================================="
echo "================================================================================="
@@ -1292,7 +1369,7 @@
start_timer() {
echo -e $BOLD"INFO(${BASH_LINENO[0]}): "${FUNCNAME[0]}"," $@ $EBOLD
TC_TIMER=$SECONDS
- echo " Timer started"
+ echo " Timer started: $(date)"
}
# Print the value of the time (in seconds)
@@ -1815,6 +1892,26 @@
return 0
}
+# This function runs a kubectl cmd where a single output value is expected, for example get ip with jsonpath filter.
+# The function retries up to the timeout given in the cmd flag '--cluster-timeout'
+# args: <full kubectl cmd with parameters
+# (Not for test scripts)
+__kube_cmd_with_timeout() {
+ TS_TMP=$(($SECONDS+$CLUSTER_TIME_OUT))
+
+ while true; do
+ kube_cmd_result=$($@)
+ if [ $? -ne 0 ]; then
+ kube_cmd_result=""
+ fi
+ if [ $SECONDS -ge $TS_TMP ] || [ ! -z "$kube_cmd_result" ] ; then
+ echo $kube_cmd_result
+ return 0
+ fi
+ sleep 1
+ done
+}
+
# This function scales or deletes all resources for app selected by the testcase.
# args: -
# (Not for test scripts)
@@ -2049,7 +2146,7 @@
TSTART=$SECONDS
loop_ctr=0
while (( $TSTART+600 > $SECONDS )); do
- result="$(__do_curl $url)"
+ result="$(__do_curl -m 10 $url)"
if [ $? -eq 0 ]; then
if [ ${#result} -gt 15 ]; then
#If response is too long, truncate
@@ -2197,7 +2294,13 @@
curlString="curl -skw %{http_code} $proxyflag $@"
echo " CMD: $curlString" >> $HTTPLOG
res=$($curlString)
+ retcode=$?
echo " RESP: $res" >> $HTTPLOG
+ echo " RETCODE: $retcode" >> $HTTPLOG
+ if [ $retcode -ne 0 ]; then
+ echo "<no-response-from-server>"
+ return 1
+ fi
http_code="${res:${#res}-3}"
if [ ${#res} -eq 3 ]; then
if [ $http_code -lt 200 ] || [ $http_code -gt 299 ]; then