Merge "Add integration override yaml for helm"
diff --git a/test/csit/plans/appc/healthcheck/bundle_query.sh b/test/csit/plans/appc/healthcheck/bundle_query.sh
index a85bf31..3801d0a 100755
--- a/test/csit/plans/appc/healthcheck/bundle_query.sh
+++ b/test/csit/plans/appc/healthcheck/bundle_query.sh
@@ -18,10 +18,10 @@
echo $SCRIPTS
-num_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1)
-#num_failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure | wc -l)
-num_failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure | wc -l)
-failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure)
+num_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client bundle:list | tail -1 | cut -d\| -f1)
+#num_failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client bundle:list | grep Failure | wc -l)
+num_failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client bundle:list | grep Failure | wc -l)
+failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client bundle:list | grep Failure)
echo "There are $num_failed_bundles failed bundles out of $num_bundles installed bundles."
diff --git a/test/csit/plans/appc/healthcheck/health_check.sh b/test/csit/plans/appc/healthcheck/health_check.sh
index 63e0b17..e4cfae8 100755
--- a/test/csit/plans/appc/healthcheck/health_check.sh
+++ b/test/csit/plans/appc/healthcheck/health_check.sh
@@ -17,7 +17,7 @@
SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo $SCRIPTS
-response=$(curl --write-out '%{http_code}' --silent --output /dev/null -H "Authorization: Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ==" -X POST -H "X-FromAppId: csit-appc" -H "X-TransactionId: csit-appc" -H "Accept: application/json" -H "Content-Type: application/json" http://localhost:8282/restconf/operations/SLI-API:healthcheck )
+response=$(curl --write-out '%{http_code}' --silent --output /dev/null -H "Authorization: Basic YWRtaW46YWRtaW4=" -X POST -H "X-FromAppId: csit-appc" -H "X-TransactionId: csit-appc" -H "Accept: application/json" -H "Content-Type: application/json" http://localhost:8282/restconf/operations/SLI-API:healthcheck )
if [ "$response" == "200" ]; then
echo "APPC health check passed."
diff --git a/test/csit/plans/appc/healthcheck/setup.sh b/test/csit/plans/appc/healthcheck/setup.sh
index 4067433..f476853 100755
--- a/test/csit/plans/appc/healthcheck/setup.sh
+++ b/test/csit/plans/appc/healthcheck/setup.sh
@@ -19,19 +19,11 @@
# Place the scripts in run order:
SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source ${WORKSPACE}/test/csit/scripts/appc/script1.sh
-amsterdam="$(echo ${WORKSPACE} | grep amsterdam | wc -l)"
-if [ "$amsterdam" != "1" ]; then
- export APPC_DOCKER_IMAGE_VERSION=1.3.0-SNAPSHOT-latest
- export CCSDK_DOCKER_IMAGE_VERSION=0.1-STAGING-latest
- export BRANCH=master
- export SOLUTION_NAME=onap
-else
- export APPC_DOCKER_IMAGE_VERSION=v1.2.0
- export CCSDK_DOCKER_IMAGE_VERSION=v0.1.0
- export BRANCH=amsterdam
- export SOLUTION_NAME=openecomp
-fi
+export APPC_DOCKER_IMAGE_VERSION=1.3.0-SNAPSHOT-latest
+export CCSDK_DOCKER_IMAGE_VERSION=0.2.1-SNAPSHOT
+export BRANCH=master
+export SOLUTION_NAME=onap
export NEXUS_USERNAME=docker
export NEXUS_PASSWD=docker
@@ -67,8 +59,8 @@
while [ "$TIME" -lt "$TIME_OUT" ]; do
startODL_status=$(docker exec appc_controller_container ps -e | grep startODL | wc -l)
-waiting_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Waiting | wc -l)
-run_level=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf system:start-level)
+waiting_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client bundle:list | grep Waiting | wc -l)
+run_level=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client system:start-level)
if [ "$run_level" == "Level 100" ] && [ "$startODL_status" -lt "1" ] && [ "$waiting_bundles" -lt "1" ] ; then
echo APPC started in $TIME seconds
diff --git a/test/csit/plans/dmaap-buscontroller/with_mr/setup.sh b/test/csit/plans/dmaap-buscontroller/with_mr/setup.sh
new file mode 100755
index 0000000..3e6efd4
--- /dev/null
+++ b/test/csit/plans/dmaap-buscontroller/with_mr/setup.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# org.onap.dmaap
+# ================================================================================
+# Copyright (C) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+#
+
+
+# Place the scripts in run order:
+source ${WORKSPACE}/test/csit/scripts/dmaap-message-router/dmaap-mr-launch.sh
+dmaap_mr_launch
+MRC_IP=${IP}
+
+source ${WORKSPACE}/test/csit/scripts/dmaap-buscontroller/start-mock.sh
+start_mock "aaf"
+AAF_IP=${IP}
+start_mock "drps"
+DRPS_IP=${IP}
+
+source ${WORKSPACE}/test/csit/scripts/dmaap-buscontroller/dmaapbc-launch.sh
+dmaapbc_launch $AAF_IP $MRC_IP $DRPS_IP
+DMAAPBC_IP=${IP}
+
+
+echo "AAF_IP=$AAF_IP MRC_IP=$MRC_IP DRPS_IP=$DRPS_IP DMAAPBC_IP=$DMAAPBC_IP"
+
+# Pass any variables required by Robot test suites in ROBOT_VARIABLES
+ROBOT_VARIABLES="-v AAF_IP:${AAF_IP} -v MRC_IP:${MRC_IP} -v DRPS_IP:${DRPS_IP} -v DMAAPBC_IP:${DMAAPBC_IP}"
+
diff --git a/test/csit/plans/dmaap-buscontroller/with_mr/teardown.sh b/test/csit/plans/dmaap-buscontroller/with_mr/teardown.sh
new file mode 100644
index 0000000..0474dde
--- /dev/null
+++ b/test/csit/plans/dmaap-buscontroller/with_mr/teardown.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# org.onap.dmaap
+# ================================================================================
+# Copyright (C) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+#
+source ${WORKSPACE}/test/csit/scripts/dmaap-message-router/dmaap-mr-teardown.sh
+
+dmaap_mr_teardown
+kill-instance.sh aaf-mock
+kill-instance.sh drps-mock
+kill-instance.sh dmaapbc
+
diff --git a/test/csit/plans/dmaap-buscontroller/with_mr/testplan.txt b/test/csit/plans/dmaap-buscontroller/with_mr/testplan.txt
new file mode 100644
index 0000000..39a2f44
--- /dev/null
+++ b/test/csit/plans/dmaap-buscontroller/with_mr/testplan.txt
@@ -0,0 +1,4 @@
+# Test suites are relative paths under [integration.git]/test/csit/tests/.
+# Place the suites in run order.
+dmaap-buscontroller/suite1
+
diff --git a/test/csit/plans/externalapi-nbi/healthcheck/setup.sh b/test/csit/plans/externalapi-nbi/healthcheck/setup.sh
new file mode 100644
index 0000000..50da7ad
--- /dev/null
+++ b/test/csit/plans/externalapi-nbi/healthcheck/setup.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# ========================================================================
+# Copyright (c) 2018 Orange
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========================================================================
+
+source ${WORKSPACE}/test/csit/scripts/externalapi-nbi/start_nbi_containers.sh
diff --git a/test/csit/plans/externalapi-nbi/healthcheck/teardown.sh b/test/csit/plans/externalapi-nbi/healthcheck/teardown.sh
new file mode 100644
index 0000000..8392972
--- /dev/null
+++ b/test/csit/plans/externalapi-nbi/healthcheck/teardown.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# ========================================================================
+# Copyright (c) 2018 Orange
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========================================================================
+
+source ${WORKSPACE}/test/csit/scripts/externalapi-nbi/delete_nbi_containers.sh
\ No newline at end of file
diff --git a/test/csit/plans/externalapi-nbi/healthcheck/testplan.txt b/test/csit/plans/externalapi-nbi/healthcheck/testplan.txt
new file mode 100644
index 0000000..06622ae
--- /dev/null
+++ b/test/csit/plans/externalapi-nbi/healthcheck/testplan.txt
@@ -0,0 +1,3 @@
+# Test suites are relative paths under [integration.git]/test/csit/tests/.
+# Place the suites in run order.
+externalapi-nbi/healthcheck
diff --git a/test/csit/plans/vnfsdk-refrepo/sanity-check/setup.sh b/test/csit/plans/vnfsdk-refrepo/sanity-check/setup.sh
index 1c3e4e8..d1930ce 100644
--- a/test/csit/plans/vnfsdk-refrepo/sanity-check/setup.sh
+++ b/test/csit/plans/vnfsdk-refrepo/sanity-check/setup.sh
@@ -18,12 +18,12 @@
#Start postgres database
-docker run -d -i -t --name=postgres -p 5432:5432 nexus3.onap.org:10001/onap/refrepo/postgres:latest
+docker run -d -i -t --name=postgres -p 5432:5432 nexus3.onap.org:10001/onap/vnfsdk/refrepo/postgres:latest
POSTGRES=`docker inspect --format '{{ .NetworkSettings.IPAddress }}' postgres`
#Start market place
-docker run -d -i -t --name=refrepo -e POSTGRES_IP=$POSTGRES -p 8702:8702 nexus3.onap.org:10001/onap/refrepo:1.1-STAGING-latest
+docker run -d -i -t --name=refrepo -e POSTGRES_IP=$POSTGRES -p 8702:8702 nexus3.onap.org:10001/onap/vnfsdk/refrepo:1.1-STAGING-latest
# Wait for Market place initialization
echo Wait for VNF Repository initialization
diff --git a/test/csit/scripts/dmaap-buscontroller/dmaapbc-launch.sh b/test/csit/scripts/dmaap-buscontroller/dmaapbc-launch.sh
index 72c4438..5c34953 100755
--- a/test/csit/scripts/dmaap-buscontroller/dmaapbc-launch.sh
+++ b/test/csit/scripts/dmaap-buscontroller/dmaapbc-launch.sh
@@ -4,7 +4,7 @@
# sets global var IP with assigned IP address
function dmaapbc_launch() {
- TAG=onap/dmaap/buscontroller
+ TAG=onap/dmaap/buscontroller:latest
CONTAINER_NAME=dmaapbc
IP=""
diff --git a/test/csit/scripts/dmaap-message-router/dmaap-mr-launch.sh b/test/csit/scripts/dmaap-message-router/dmaap-mr-launch.sh
new file mode 100755
index 0000000..95fecff
--- /dev/null
+++ b/test/csit/scripts/dmaap-message-router/dmaap-mr-launch.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP DMAAP MR
+# ================================================================================
+# Copyright (C) 2018 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+# This script is a copy of plans/dmaap/mrpubsub/setup.sh, placed in the scripts
+# dir, and edited to be a callable function from other plans. e.g. dmaap-buscontroller needs it.
+#
+source ${SCRIPTS}/common_functions.sh
+
+# function to launch DMaaP MR docker containers.
+# sets global var IP with assigned IP address of MR container.
+# (kafka and zk containers are not called externally)
+
+function dmaap_mr_launch() {
+ # Clone DMaaP Message Router repo
+ mkdir -p $WORKSPACE/archives/dmaapmr
+ cd $WORKSPACE/archives/dmaapmr
+ #unset http_proxy https_proxy
+ git clone --depth 1 http://gerrit.onap.org/r/dmaap/messagerouter/messageservice -b master
+ git pull
+ cd $WORKSPACE/archives/dmaapmr/messageservice/src/main/resources/docker-compose
+ cp $WORKSPACE/archives/dmaapmr/messageservice/bundleconfig-local/etc/appprops/MsgRtrApi.properties /var/tmp/
+
+
+ # start DMaaP MR containers with docker compose and configuration from docker-compose.yml
+ docker login -u docker -p docker nexus3.onap.org:10001
+ docker-compose up -d
+
+ # Wait for initialization of Docker contaienr for DMaaP MR, Kafka and Zookeeper
+ for i in {1..50}; do
+ if [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ] && \
+ [ $(docker inspect --format '{{ .State.Running }}' dockercompose_zookeeper_1) ] && \
+ [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ]
+ then
+ echo "DMaaP Service Running"
+ break
+ else
+ echo sleep $i
+ sleep $i
+ fi
+ done
+
+
+ DMAAP_MR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_dmaap_1)
+ IP=${DMAAP_MR_IP}
+ KAFKA_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_kafka_1)
+ ZOOKEEPER_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_zookeeper_1)
+
+ echo DMAAP_MR_IP=${DMAAP_MR_IP}
+ echo IP=${IP}
+ echo KAFKA_IP=${KAFKA_IP}
+ echo ZOOKEEPER_IP=${ZOOKEEPER_IP}
+
+ # Initial docker-compose up and down is for populating kafka and zookeeper IPs in /var/tmp/MsgRtrApi.properites
+ docker-compose down
+
+ # Update kafkfa and zookeeper properties in MsgRtrApi.propeties which will be copied to DMaaP Container
+ sed -i -e 's/<zookeeper_host>/'$ZOOKEEPER_IP'/' /var/tmp/MsgRtrApi.properties
+ sed -i -e 's/<kafka_host>:<kafka_port>/'$KAFKA_IP':9092/' /var/tmp/MsgRtrApi.properties
+
+ docker-compose build
+ docker login -u docker -p docker nexus3.onap.org:10001
+ docker-compose up -d
+
+ # Wait for initialization of Docker containers
+ for i in {1..50}; do
+ if [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ] && \
+ [ $(docker inspect --format '{{ .State.Running }}' dockercompose_zookeeper_1) ] && \
+ [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ]
+ then
+ echo "DMaaP Service Running"
+ break
+ else
+ echo sleep $i
+ sleep $i
+ fi
+ done
+
+ # Wait for initialization of docker services
+ for i in {1..50}; do
+ curl -sS -m 1 ${DMAAP_MR_IP}:3904/events/TestTopic && break
+ echo sleep $i
+ sleep $i
+ done
+}
+
diff --git a/test/csit/scripts/dmaap-message-router/dmaap-mr-teardown.sh b/test/csit/scripts/dmaap-message-router/dmaap-mr-teardown.sh
new file mode 100755
index 0000000..f573857
--- /dev/null
+++ b/test/csit/scripts/dmaap-message-router/dmaap-mr-teardown.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2018 AT&T Intellectual Property
+#
+
+function dmaap_mr_teardown() {
+kill-instance.sh dockercompose_dmaap_1
+kill-instance.sh dockercompose_kafka_1
+kill-instance.sh dockercompose_zookeeper_1
+}
diff --git a/test/csit/scripts/externalapi-nbi/delete_nbi_containers.sh b/test/csit/scripts/externalapi-nbi/delete_nbi_containers.sh
new file mode 100644
index 0000000..76be328
--- /dev/null
+++ b/test/csit/scripts/externalapi-nbi/delete_nbi_containers.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# ========================================================================
+# Copyright (c) 2018 Orange
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========================================================================
+
+echo "This is ${WORKSPACE}/test/csit/scripts/externalapi-nbi/delete_nbi_containers.sh"
+
+# Check if docker-compose file exists
+if [ ! -f "$WORKSPACE/externalapi-nbi/docker-compose.yml" ]; then
+ echo 'There is nothing to clean. Exiting...' >&2
+ exit 0
+fi
+
+cd $WORKSPACE/externalapi-nbi
+
+# Remove containers and attached/anonymous volume(s)
+docker-compose down -v
+# Force stop & remove all containers and volumes
+docker-compose rm -f -s -v
+
+# clean up
+rm -rf $WORKSPACE/externalapi-nbi
\ No newline at end of file
diff --git a/test/csit/scripts/externalapi-nbi/start_nbi_containers.sh b/test/csit/scripts/externalapi-nbi/start_nbi_containers.sh
new file mode 100644
index 0000000..d1a026a
--- /dev/null
+++ b/test/csit/scripts/externalapi-nbi/start_nbi_containers.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# ========================================================================
+# Copyright (c) 2018 Orange
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========================================================================
+
+NEXUS_USERNAME=docker
+NEXUS_PASSWD=docker
+NEXUS_DOCKER_REPO=nexus3.onap.org:10001
+DOCKER_IMAGE_VERSION=latest
+
+echo "This is ${WORKSPACE}/test/csit/scripts/externalapi-nbi/start_nbi_containers.sh"
+
+# Create directory
+mkdir -p $WORKSPACE/externalapi-nbi
+cd $WORKSPACE/externalapi-nbi
+
+# Fetch the latest docker-compose.yml
+wget -O docker-compose.yml 'https://git.onap.org/externalapi/nbi/plain/docker-compose.yml?h=master'
+
+# Pull the nbi docker image from nexus
+# MariaDB and mongoDB will be pulled automatically from docker.io during docker-compose
+docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
+docker pull $NEXUS_DOCKER_REPO/onap/externalapi/nbi:$DOCKER_IMAGE_VERSION
+
+# Start nbi, MariaDB and MongoDB containers with docker compose and nbi/docker-compose.yml
+docker-compose up -d mariadb mongo && sleep 5 # to ensure that these services are ready for connections
+docker-compose up -d nbi
diff --git a/test/csit/tests/externalapi-nbi/healthcheck/__init__.robot b/test/csit/tests/externalapi-nbi/healthcheck/__init__.robot
new file mode 100644
index 0000000..8263507
--- /dev/null
+++ b/test/csit/tests/externalapi-nbi/healthcheck/__init__.robot
@@ -0,0 +1,2 @@
+*** Settings ***
+Documentation ExternalAPI-NBI - healthcheck
diff --git a/test/csit/tests/externalapi-nbi/healthcheck/healthcheck.robot b/test/csit/tests/externalapi-nbi/healthcheck/healthcheck.robot
new file mode 100644
index 0000000..eb5a5bb
--- /dev/null
+++ b/test/csit/tests/externalapi-nbi/healthcheck/healthcheck.robot
@@ -0,0 +1,29 @@
+*** Settings ***
+Documentation The main interface for interacting with External API/NBI
+Library RequestsLibrary
+
+*** Variables ***
+${GLOBAL_NBI_SERVER_PROTOCOL} http
+${GLOBAL_INJECTED_NBI_IP_ADDR} localhost
+${GLOBAL_NBI_SERVER_PORT} 8080
+${NBI_HEALTH_CHECK_PATH} /nbi/api/v1/status
+${NBI_ENDPOINT} ${GLOBAL_NBI_SERVER_PROTOCOL}://${GLOBAL_INJECTED_NBI_IP_ADDR}:${GLOBAL_NBI_SERVER_PORT}
+
+
+*** Keywords ***
+Run NBI Health Check
+ [Documentation] Runs NBI Health check
+ ${resp}= Run NBI Get Request ${NBI_HEALTH_CHECK_PATH}
+ Should Be Equal As Integers ${resp.status_code} 200
+
+Run NBI Get Request
+ [Documentation] Runs NBI Get request
+ [Arguments] ${data_path}
+ ${session}= Create Session session ${NBI_ENDPOINT}
+ ${resp}= Get Request session ${data_path}
+ Should Be Equal As Integers ${resp.status_code} 200
+ Log Received response from NBI ${resp.text}
+ ${json}= Set Variable ${resp.json()}
+ ${status}= Get From Dictionary ${json} status
+ Should Be Equal ${status} ok
+ [Return] ${resp}
\ No newline at end of file
diff --git a/test/s3p/collector/get_resource_stats.py b/test/s3p/collector/get_resource_stats.py
new file mode 100755
index 0000000..8ad22c5
--- /dev/null
+++ b/test/s3p/collector/get_resource_stats.py
@@ -0,0 +1,87 @@
+#!/usr/bin/python
+import subprocess
+import sys
+import json
+import datetime
+import collections
+import re
+import tzlocal
+from decimal import Decimal
+
+sys.path.append('../util')
+import docker_util
+
+AAI1_NAME = "AAI1"
+AAI2_NAME = "AAI2"
+SO_NAME = "SO"
+SDNC_NAME = "SDNC"
+AAI1_IP = "10.0.1.1"
+AAI2_IP = "10.0.1.2"
+SO_IP = "10.0.5.1"
+SDNC_IP = "10.0.7.1"
+
+def aai1():
+ containers = docker_util.get_container_list(AAI1_IP)
+ run(AAI1_NAME, AAI1_IP, containers)
+
+def aai2():
+ containers = docker_util.get_container_list(AAI2_IP)
+ run(AAI2_NAME, AAI2_IP, containers)
+
+def so():
+ containers = docker_util.get_container_list(SO_IP)
+ run(SO_NAME, SO_IP, containers)
+
+def sdnc():
+ containers = docker_util.get_container_list(SDNC_IP)
+ run(SDNC_NAME, SDNC_IP, containers)
+
+def run(component, ip, containers):
+ cmd = ["ssh", "-i", "onap_dev"]
+ cmd.append("ubuntu@" + ip)
+ cmd.append("sudo docker stats --no-stream")
+ for c in containers:
+ cmd.append(c)
+ ssh = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ result = ssh.stdout.readlines()
+ if result == []:
+ error = ssh.stderr.readlines()
+ print(error)
+ else:
+ result.pop(0)
+ for line in result:
+ token = line.decode('ascii').strip().split()
+ data = collections.OrderedDict()
+ data['datetime'] = datetime.datetime.now(tzlocal.get_localzone()).strftime("%Y-%m-%dT%H:%M:%S%Z")
+ data['component'] = component
+ data['container'] = token[0]
+ data['cpu'] = get_percent_number(token[1])
+ data['memory'] = get_memory_number(token[2])
+ data['physical'] = get_memory_number(token[4])
+ data['mem_percent'] = get_percent_number(token[5])
+ size = docker_util.get_container_volume_size(ip, data['container'])
+ if size is not None:
+ data['volume'] = size
+ file.write(json.dumps(data, default = myconverter) + "\n")
+ file.flush()
+
+def myconverter(o):
+ if isinstance(o, datetime.datetime):
+ return o.__str__()
+
+def get_percent_number(s):
+ return float(re.sub('[^0-9\.]', '', s))
+
+def get_memory_number(s):
+ f = float(re.sub('[^0-9\.]', '', s))
+ if s.endswith("GiB"):
+ f = f*1000
+ return f
+
+file = open("resource.log", "w+")
+while True:
+ so()
+ sdnc()
+ aai1()
+ aai2()
diff --git a/test/s3p/generator/locustfile.py b/test/s3p/generator/locustfile.py
new file mode 100644
index 0000000..63031cd
--- /dev/null
+++ b/test/s3p/generator/locustfile.py
@@ -0,0 +1,186 @@
+import random
+import string
+import time
+import datetime
+import sys
+import collections
+import json
+import tzlocal
+import os
+import fcntl
+import logging
+from locust import HttpLocust, TaskSet, task
+from decimal import Decimal
+
+
+class UserBehavior(TaskSet):
+ base = "/ecomp/mso/infra/e2eServiceInstances/v3"
+ headers = {"Accept":"application/json","Content-Type":"application/json","Authorization":"Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA=="}
+ service_creation_body = "{\"service\": {\"name\": \"E2E_volte_%s\", \"description\": \"E2E_volte_ONAP_deploy\", \"serviceDefId\": \"a16eb184-4a81-4c8c-89df-c287d390315a\", \"templateId\": \"012c3446-51db-4a2a-9e64-a936f10a5e3c\", \"parameters\": { \"globalSubscriberId\": \"Demonstration\", \"subscriberName\": \"Demonstration\", \"serviceType\": \"vIMS\", \"templateName\": \"VoLTE e2e Service:null\", \"resources\": [ { \"resourceName\": \"VL OVERLAYTUNNEL\", \"resourceDefId\": \"671d4757-b018-47ab-9df3-351c3bda0a98\", \"resourceId\": \"e859b0fd-d928-4cc8-969e-0fee7795d623\", \"nsParameters\": { \"locationConstraints\": [], \"additionalParamForNs\": { \"site2_vni\": \"5010\", \"site1_localNetworkAll\": \"false\", \"site1_vni\": \"5010\", \"site1_exportRT1\": \"11:1\", \"description\": \"overlay\", \"site2_localNetworkAll\": \"false\", \"site1_routerId\": \"9.9.9.9\", \"site1_fireWallEnable\": \"false\", \"site1_networkName\": \"network1\", \"site2_description\": \"overlay\", \"site1_importRT1\": \"11:1\", \"site1_description\": \"overlay\", \"site2_networkName\": \"network3\", \"name\": \"overlay\", \"site2_fireWallEnable\": \"false\", \"site2_id\": \"ZTE-DCI-Controller\", \"site2_routerId\": \"9.9.9.9\", \"site2_importRT1\": \"11:1\", \"site2_exportRT1\": \"11:1\", \"site2_fireWallId\": \"false\", \"site1_id\": \"DCI-Controller-1\", \"tunnelType\": \"L3-DCI\" } } },{\"resourceName\": \"VL UNDERLAYVPN\", \"resourceDefId\": \"4f5d692b-4022-43ab-b878-a93deb5b2061\", \"resourceId\": \"b977ec47-45b2-41f6-aa03-bf6554dc9620\", \"nsParameters\": { \"locationConstraints\": [], \"additionalParamForNs\": { \"topology\": \"full-mesh\", \"site2_name\": \"site2\", \"sna2_name\": \"site2_sna\", \"description\": \"underlay\", \"sna1_name\": \"site1_sna\", \"ac1_route\": \"3.3.3.12/30:dc84ce88-99f7\", \"ac2_peer_ip\": \"3.3.3.20/30\", \"technology\": \"mpls\", \"ac2_route\": \"3.3.3.20/30:98928302-3287\", \"ac2_id\": \"84d937a4-b227-375f-a744-2b778f36e04e\", \"ac1_protocol\": \"STATIC\", \"ac2_svlan\": \"4004\", \"serviceType\": \"l3vpn-ipwan\", \"ac2_ip\": \"3.3.3.21/30\", \"pe2_id\": \"4412d3f0-c296-314d-9284-b72fc5d485e8\", \"ac1_id\": \"b4f01ac0-c1e1-3e58-a8be-325e4372c960\", \"af_type\": \"ipv4\", \"ac1_svlan\": \"4002\", \"ac1_peer_ip\": \"3.3.3.12/30\", \"ac1_ip\": \"3.3.3.13/30\", \"version\": \"1.0\", \"name\": \"testunderlay\", \"id\": \"123124141\", \"pe1_id\": \"2ef788f0-407c-3070-b756-3a5cd71fde18\", \"ac2_protocol\": \"STATIC\", \"site1_name\": \"stie1\" } } } ] } } }"
+ # following class variables to make them unique across all users
+ transaction_file= open("transaction.log", "w+")
+ operation_file = open("operation.log", "w+")
+
+ def on_start(self):
+ """ on_start is called when a Locust start before any task is scheduled """
+ self.init()
+
+ def init(self):
+ pass
+
+ def myconverter(self, o):
+ if isinstance(o, datetime.datetime):
+ return o.__str__()
+
+ @task(1)
+ def create_service(self):
+ # Post a E2E service instantiation request to SO
+ method = "POST"
+ url = self.base
+ service_instance_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
+ data = self.service_creation_body % service_instance_name
+
+ t1 = datetime.datetime.now(tzlocal.get_localzone())
+ response = self.client.request(method, url, headers=self.headers, data=data)
+ t2 = datetime.datetime.now(tzlocal.get_localzone())
+ delta = t2 - t1
+ data = collections.OrderedDict()
+ data['datetime'] = datetime.datetime.now(tzlocal.get_localzone()).strftime("%Y-%m-%dT%H:%M:%S%Z")
+ data['method'] = method
+ data['url'] = url
+ data['status_code'] = response.status_code
+ data['transaction_time'] = (delta.seconds*10^6 + delta.microseconds)/1000
+ fcntl.flock(self.transaction_file, fcntl.LOCK_EX)
+ self.transaction_file.write(json.dumps(data, default = self.myconverter) + "\n")
+ self.transaction_file.flush()
+ os.fsync(self.transaction_file)
+ fcntl.flock(self.transaction_file, fcntl.LOCK_UN)
+ serviceId = response.json()['service']['serviceId']
+ operationId = response.json()['service']['operationId']
+
+ # Get the request status
+ method = "GET"
+ url = self.base + "/" + serviceId + "/operations/" + operationId
+ url1 = "/ecomp/mso/infra/e2eServiceInstances/v3/{serviceId}/operations/{operationId}"
+ count = 1
+ while count < 50:
+ tt1 = datetime.datetime.now()
+ response = self.client.request(method, url, name=url1, headers=self.headers)
+ tt2 = datetime.datetime.now()
+ delta = tt2 - tt1
+ result = response.json()['operationStatus']['result']
+ progress = response.json()['operationStatus']['progress']
+ data = collections.OrderedDict()
+ data['datetime'] = datetime.datetime.now(tzlocal.get_localzone()).strftime("%Y-%m-%dT%H:%M:%S%Z")
+ data['method'] = method
+ data['url'] = url1
+ data['status_code'] = response.status_code
+ data['count'] = count
+ data['result'] = result
+ data['progress'] = progress
+ data['transaction_time'] = (delta.seconds*10^6 + delta.microseconds)/1000
+ fcntl.flock(self.transaction_file, fcntl.LOCK_EX)
+ self.transaction_file.write(json.dumps(data, default = self.myconverter) + "\n")
+ self.transaction_file.flush()
+ os.fsync(self.transaction_file)
+ fcntl.flock(self.transaction_file, fcntl.LOCK_UN)
+ if result == "finished" or result == "error":
+ break
+ else:
+ time.sleep(1)
+ count = count + 1
+
+ if result == "finished":
+ result = "success"
+ else:
+ result = "failure"
+ t3 = datetime.datetime.now(tzlocal.get_localzone())
+ delta = t3 - t1
+ data = collections.OrderedDict()
+ data['datetime'] = t1.strftime("%Y-%m-%dT%H:%M:%S%Z")
+ data['operation'] = "volte_create"
+ data['result'] = result
+ data['duration'] = round(delta.seconds + Decimal(delta.microseconds/1000000.0), 3)
+ fcntl.flock(self.operation_file, fcntl.LOCK_EX)
+ self.operation_file.write(json.dumps(data, default = self.myconverter) + "\n")
+ self.operation_file.flush()
+ os.fsync(self.operation_file)
+ fcntl.flock(self.operation_file, fcntl.LOCK_UN)
+
+ self.delete_service(serviceId)
+
+ def delete_service(self, serviceId):
+ method = "DELETE"
+ url = self.base + "/" + serviceId
+ data = "{\"globalSubscriberId\":\"Demonstration\", \"serviceType\":\"vIMS\"}"
+ t1 = datetime.datetime.now(tzlocal.get_localzone())
+ response = self.client.request(method, url, name=self.base, headers=self.headers, data=data)
+ t2 = datetime.datetime.now(tzlocal.get_localzone())
+ delta = t2 - t1
+ data = collections.OrderedDict()
+ data['datetime'] = datetime.datetime.now(tzlocal.get_localzone()).strftime("%Y-%m-%dT%H:%M:%S%Z")
+ data['method'] = method
+ data['url'] = self.base
+ data['status_code'] = response.status_code
+ data['transaction_time'] = (delta.seconds*10^6 + delta.microseconds)/1000
+ fcntl.flock(self.transaction_file, fcntl.LOCK_EX)
+ self.transaction_file.write(json.dumps(data, default = self.myconverter) + "\n")
+ self.transaction_file.flush()
+ os.fsync(self.transaction_file)
+ fcntl.flock(self.transaction_file, fcntl.LOCK_UN)
+ operationId = response.json()['operationId']
+
+ # Get the request status
+ method = "GET"
+ url = self.base + "/" + serviceId + "/operations/" + operationId
+ url1 = "/ecomp/mso/infra/e2eServiceInstances/v3/{serviceId}/operations/{operationId}"
+ count = 1
+ while count < 50:
+ tt1 = datetime.datetime.now(tzlocal.get_localzone())
+ response = self.client.request(method, url, name=url1, headers=self.headers)
+ tt2 = datetime.datetime.now(tzlocal.get_localzone())
+ delta = tt2 - tt1
+ result = response.json()['operationStatus']['result']
+ progress = response.json()['operationStatus']['progress']
+ data = collections.OrderedDict()
+ data['datetime'] = datetime.datetime.now(tzlocal.get_localzone()).strftime("%Y-%m-%dT%H:%M:%S%Z")
+ data['method'] = method
+ data['url'] = url1
+ data['status_code'] = response.status_code
+ data['count'] = count
+ data['result'] = result
+ data['progress'] = progress
+ data['transaction_time'] = (delta.seconds*10^6 + delta.microseconds)/1000
+ fcntl.flock(self.transaction_file, fcntl.LOCK_EX)
+ self.transaction_file.write(json.dumps(data, default = self.myconverter) + "\n")
+ self.transaction_file.flush()
+ os.fsync(self.transaction_file)
+ fcntl.flock(self.transaction_file, fcntl.LOCK_UN)
+ if result == "finished" or result == "error":
+ break
+ else:
+ time.sleep(1)
+ count = count + 1
+
+ if result == "finished":
+ result = "success"
+ else:
+ result = "failure"
+ t3 = datetime.datetime.now(tzlocal.get_localzone())
+ delta = t3 - t1
+ data = collections.OrderedDict()
+ data['datetime'] = t1.strftime("%Y-%m-%dT%H:%M:%S%Z")
+ data['operation'] = "volte_delete"
+ data['result'] = result
+ data['duration'] = round(delta.seconds + Decimal(delta.microseconds/1000000.0), 3)
+ fcntl.flock(self.operation_file, fcntl.LOCK_EX)
+ self.operation_file.write(json.dumps(data, default = self.myconverter) + "\n")
+ self.operation_file.flush()
+ os.fsync(self.operation_file)
+ fcntl.flock(self.operation_file, fcntl.LOCK_UN)
+
+
+class WebsiteUser(HttpLocust):
+ task_set = UserBehavior
+ min_wait = 1000
+ max_wait = 3000
diff --git a/test/s3p/mock/set_expectation.sh b/test/s3p/mock/set_expectation.sh
new file mode 100755
index 0000000..e8c2c24
--- /dev/null
+++ b/test/s3p/mock/set_expectation.sh
@@ -0,0 +1,249 @@
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "GET",
+ "path": "/api/huaweivnfmdriver/v1/swagger.json"
+ },
+ "httpResponse": {
+ "statusCode": 200,
+ "headers": {
+ "content-type": ["application/json"]
+ },
+ "body": {
+ "not": false,
+ "type": "JSON",
+ "json": "{\"errcode\":\"0\",\"errmsg\":\"get token successfully.\",\"data\":{\"expiredDate\":\"2018-11-10 10:03:33\"}}"
+ }
+ },
+ "times" : {
+ "unlimited" : true
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "POST",
+ "path": "/controller/v2/tokens"
+ },
+ "httpResponse": {
+ "statusCode": 200,
+ "headers": {
+ "content-type": ["application/json"]
+ },
+ "body": {
+ "not": false,
+ "type": "JSON",
+ "json": "{\"errcode\":\"0\",\"errmsg\":\"get token successfully.\",\"data\":{\"expiredDate\":\"2018-11-10 10:03:33\",\"token_id\":\"7F06BFDDAC33A989:77DAD6058B1BB81EF1A557745E4D9C78399B31C4DB509704ED8A7DF05A362A59\"}}"
+ }
+ },
+ "times" : {
+ "unlimited" : true
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "POST",
+ "path": "/restconf/data/huawei-ac-net-l3vpn-svc:l3vpn-svc-cfg/vpn-services"
+ },
+ "httpResponse": {
+ "statusCode": 201
+ },
+ "times" : {
+ "unlimited" : true
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "PUT",
+ "path": "/restconf/data/huawei-ac-net-l3vpn-svc:l3vpn-svc-cfg/huawei-ac-net-l3vpn-svc-vfi:vrf-attributes"
+ },
+ "httpResponse": {
+ "statusCode": 204
+ },
+ "times" : {
+ "unlimited" : true
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "POST",
+ "path": "/restconf/data/huawei-ac-net-l3vpn-svc:l3vpn-svc-cfg/sites"
+ },
+ "httpResponse": {
+ "statusCode": 201
+ },
+ "times" : {
+ "unlimited" : true
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
+# ZTE DCI
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "POST",
+ "path": "/v2.0/l3-dci-connects"
+ },
+ "httpResponse": {
+ "statusCode": 201
+ },
+ "times" : {
+ "unlimited" : true
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
+# huaweivnfmdriver
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "POST",
+ "path": "/api/huaweivnfmdriver/v1/a0400010-11d7-4875-b4ae-5f42ed5d3a85/vnfs"
+ },
+ "httpResponse": {
+ "statusCode": 200,
+ "headers": {
+ "content-type": ["application/json"]
+ },
+ "body": {
+ "not": false,
+ "type": "JSON",
+ "json": "{\"vnfInstanceId\":\"fa3dca847b054f4eb9d3bc8bb9e5eec9\",\"jobId\":\"fa3dca847b054f4eb9d3bc8bb9e5eec9_post\"}"
+ }
+ },
+ "times" : {
+ "unlimited" : true
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
+# huaweivnfmdriver
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "GET",
+ "path": "/api/huaweivnfmdriver/v1/a0400010-11d7-4875-b4ae-5f42ed5d3a85/jobs/fa3dca847b054f4eb9d3bc8bb9e5eec9_post",
+ "queryStringParameters": {
+ "responseId": ["0"]
+ }
+ },
+ "httpResponse": {
+ "statusCode": 200,
+ "headers": {
+ "content-type": ["application/json"]
+ },
+ "body": {
+ "not": false,
+ "type": "JSON",
+ "json": "{\"jobId\":\"fa3dca847b054f4eb9d3bc8bb9e5eec9\",\"responsedescriptor\":{\"progress\":\"50\",\"status\":\"processing\",\"errorCode\":null,\"responseId\":\"0\"}}"
+ }
+ },
+ "times" : {
+ "unlimited" : false
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
+# huaweivnfmdriver
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "GET",
+ "path": "/api/huaweivnfmdriver/v1/a0400010-11d7-4875-b4ae-5f42ed5d3a85/jobs/fa3dca847b054f4eb9d3bc8bb9e5eec9_post",
+ "queryStringParameters": {
+ "responseId": ["0"]
+ }
+ },
+ "httpResponse": {
+ "statusCode": 200,
+ "headers": {
+ "content-type": ["application/json"]
+ },
+ "body": {
+ "not": false,
+ "type": "JSON",
+ "json": "{\"jobId\":\"fa3dca847b054f4eb9d3bc8bb9e5eec9\",\"responsedescriptor\":{\"progress\":\"100\",\"status\":\"processing\",\"errorCode\":null,\"responseId\":\"0\"}}"
+ }
+ },
+ "times" : {
+ "unlimited" : true
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "GET",
+ "path": "/api/huaweivnfmdriver/v1/a0400010-11d7-4875-b4ae-5f42ed5d3a85/jobs/fa3dca847b054f4eb9d3bc8bb9e5eec9_post",
+ "queryStringParameters": {
+ "responseId": ["50"]
+ }
+ },
+ "httpResponse": {
+ "statusCode": 200,
+ "headers": {
+ "content-type": ["application/json"]
+ },
+ "body": {
+ "not": false,
+ "type": "JSON",
+ "json": "{\"jobId\":\"fa3dca847b054f4eb9d3bc8bb9e5eec9\",\"responsedescriptor\":{\"progress\":\"100\",\"status\":\"processing\",\"errorCode\":null,\"responseId\":\"50\"}}"
+ }
+ },
+ "times" : {
+ "unlimited" : true
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
+
+curl -v -X PUT "http://localhost:1080/expectation" -d '{
+ "httpRequest": {
+ "method": "GET",
+ "path": "/api/huaweivnfmdriver/v1/a0400010-11d7-4875-b4ae-5f42ed5d3a85/jobs/fa3dca847b054f4eb9d3bc8bb9e5eec9_post",
+ "queryStringParameters": {
+ "responseId": ["2"]
+ }
+ },
+ "httpResponse": {
+ "statusCode": 200,
+ "headers": {
+ "content-type": ["application/json"]
+ },
+ "body": {
+ "not": false,
+ "type": "JSON",
+ "json": "{\"jobId\":\"fa3dca847b054f4eb9d3bc8bb9e5eec9\",\"responsedescriptor\":{\"progress\":\"100\",\"status\":\"processing\",\"errorCode\":null,\"responseId\":\"2\"}}"
+ }
+ },
+ "times" : {
+ "unlimited" : true
+ },
+ "timeToLive" : {
+ "unlimited" : true
+ }
+}'
+
diff --git a/test/vcpe/config_sdnc_so.py b/test/vcpe/config_sdnc_so.py
new file mode 100755
index 0000000..660c70e
--- /dev/null
+++ b/test/vcpe/config_sdnc_so.py
@@ -0,0 +1,89 @@
+#! /usr/bin/python
+
+import logging
+from vcpecommon import *
+import csar_parser
+
+
+def insert_customer_service_to_sdnc(vcpecommon):
+ """
+ INSERT INTO SERVICE_MODEL (`service_uuid`, `model_yaml`,`invariant_uuid`,`version`,`name`,`description`,`type`,`category`,`ecomp_naming`,`service_instance_name_prefix`,`filename`,`naming_policy`) values ('7e319b6f-e710-440e-bbd2-63c1004949ef', null, 'a99ace8a-6e3b-447d-b2ff-4614e4234eea',null,'vCPEService', 'vCPEService', 'Service','Network L1-3', 'N', 'vCPEService', 'vCpeResCust110701/service-Vcperescust110701-template.yml',null);
+ INSERT INTO ALLOTTED_RESOURCE_MODEL (`customization_uuid`,`model_yaml`,`invariant_uuid`,`uuid`,`version`,`naming_policy`,`ecomp_generated_naming`,`depending_service`,`role`,`type`,`service_dependency`,`allotted_resource_type`) VALUES ( '7e40b664-d7bf-47ad-8f7c-615631d53cd7', NULL, 'f51b0aae-e24a-4cff-b190-fe3daf3d15ee', 'f3137496-1605-40e9-b6df-64aa0f8e91a0', '1.0', NULL,'Y',NULL,NULL,'TunnelXConnect',NULL, 'TunnelXConnect');
+ INSERT INTO ALLOTTED_RESOURCE_MODEL (`customization_uuid`,`model_yaml`,`invariant_uuid`,`uuid`,`version`,`naming_policy`,`ecomp_generated_naming`,`depending_service`,`role`,`type`,`service_dependency`,`allotted_resource_type`) VALUES ( 'e46097e1-6a0c-4cf3-a7e5-c39ed407e65e', NULL, 'aa60f6ba-541b-48d6-a5ff-3b0e1f0ad9bf', '0e157d52-b945-422f-b3a8-ab685c2be079', '1.0', NULL,'Y',NULL,NULL,'BRG',NULL, 'TunnelXConnect');
+ INSERT INTO VF_MODEL (`customization_uuid`,`model_yaml`,`invariant_uuid`,`uuid`,`version`,`name`,`naming_policy`,`ecomp_generated_naming`,`avail_zone_max_count`,`nf_function`,`nf_code`,`nf_type`,`nf_role`,`vendor`,`vendor_version`) VALUES ( '3768afa5-cf9e-4071-bb25-3a2e2628dd87', NULL, '5f56893b-d026-4672-b785-7f5ffeb498c6', '7cf28b23-5719-485b-9ab4-dae1a2fa0e07', '1.0', 'vspvgw111601',NULL,'Y',1,NULL,NULL,NULL,NULL,'vCPE','1.0');
+ INSERT INTO VF_MODULE_MODEL (`customization_uuid`,`model_yaml`,`invariant_uuid`,`uuid`,`version`,`vf_module_type`,`availability_zone_count`,`ecomp_generated_vm_assignments`) VALUES ( '17a9c7d1-6f8e-4930-aa83-0d323585184f', NULL, 'd772ddd1-7623-40b4-a2a5-ec287916cb51', '6e1a0652-f5e9-4caa-bff8-39bf0c8589a3', '1.0', 'Base',NULL,NULL);
+
+ :return:
+ """
+ logger = logging.getLogger('__name__')
+ logger.info('Inserting customer service data to SDNC DB')
+ csar_file = vcpecommon.find_file('rescust', 'csar', 'csar')
+ parser = csar_parser.CsarParser()
+ parser.parse_csar(csar_file)
+ cmds = []
+ cmds.append("INSERT INTO SERVICE_MODEL (`service_uuid`, `model_yaml`,`invariant_uuid`,`version`,`name`," \
+ "`description`,`type`,`category`,`ecomp_naming`,`service_instance_name_prefix`,`filename`," \
+ "`naming_policy`) values ('{0}', null, '{1}',null,'{2}', 'vCPEService', 'Service','Network L1-3'," \
+ "'N', 'vCPEService', '{3}/{4}',null);".format(parser.svc_model['modelVersionId'],
+ parser.svc_model['modelInvariantId'],
+ parser.svc_model['modelName'],
+ parser.svc_model['modelName'],
+ parser.svc_model['modelName']))
+
+ for model in parser.vnf_models:
+ if 'tunnel' in model['modelCustomizationName'].lower() or 'brg' in model['modelCustomizationName'].lower():
+ cmds.append("INSERT INTO ALLOTTED_RESOURCE_MODEL (`customization_uuid`,`model_yaml`,`invariant_uuid`," \
+ "`uuid`,`version`,`naming_policy`,`ecomp_generated_naming`,`depending_service`,`role`,`type`," \
+ "`service_dependency`,`allotted_resource_type`) VALUES ('{0}',NULL,'{1}','{2}','1.0'," \
+ "NULL,'Y', NULL,NULL,'TunnelXConnect'," \
+ "NULL, 'TunnelXConnect');".format(model['modelCustomizationId'], model['modelInvariantId'],
+ model['modelVersionId']))
+ else:
+ cmds.append("INSERT INTO VF_MODEL (`customization_uuid`,`model_yaml`,`invariant_uuid`,`uuid`,`version`," \
+ "`name`,`naming_policy`,`ecomp_generated_naming`,`avail_zone_max_count`,`nf_function`," \
+ "`nf_code`,`nf_type`,`nf_role`,`vendor`,`vendor_version`) VALUES ('{0}',NULL,'{1}','{2}'," \
+ "'1.0', '{3}',NULL,'Y',1,NULL,NULL,NULL,NULL,'vCPE'," \
+ "'1.0');".format(model['modelCustomizationId'], model['modelInvariantId'],
+ model['modelVersionId'], model['modelCustomizationName'].split()[0]))
+
+ model = parser.vfmodule_models[0]
+ cmds.append("INSERT INTO VF_MODULE_MODEL (`customization_uuid`,`model_yaml`,`invariant_uuid`,`uuid`,`version`," \
+ "`vf_module_type`,`availability_zone_count`,`ecomp_generated_vm_assignments`) VALUES ('{0}', NULL," \
+ "'{1}', '{2}', '1.0', 'Base',NULL,NULL)" \
+ ";".format(model['modelCustomizationId'], model['modelInvariantId'], model['modelVersionId']))
+ print('\n'.join(cmds))
+ vcpecommon.insert_into_sdnc_db(cmds)
+
+
+def insert_customer_service_to_so(vcpecommon):
+ logger = logging.getLogger('__name__')
+ logger.info('Inserting neutron HEAT template to SO DB and creating a recipe for customer service')
+ csar_file = vcpecommon.find_file('rescust', 'csar', 'csar')
+ parser = csar_parser.CsarParser()
+ parser.parse_csar(csar_file)
+ cmds = []
+ cmds.append("INSERT INTO `service_recipe` (`ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, " \
+ "`SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, " \
+ "`SERVICE_MODEL_UUID`) VALUES ('createInstance','1','{0}'," \
+ "'/mso/async/services/CreateVcpeResCustService',NULL,181,NULL, NOW()," \
+ "'{1}');".format(parser.svc_model['modelName'], parser.svc_model['modelVersionId']))
+
+ cmds.append("delete from `heat_template_params` where"
+ "`HEAT_TEMPLATE_ARTIFACT_UUID`='efee1d84-b8ec-11e7-abc4-cec278b6b50a';")
+ cmds.append("delete from `heat_template` where ARTIFACT_UUID='efee1d84-b8ec-11e7-abc4-cec278b6b50a';")
+ network_tempalte_file = vcpecommon.find_file('neutron', 'yaml', 'preload_templates')
+ with open(network_tempalte_file, 'r') as fin:
+ lines = fin.readlines()
+ longtext = '\n'.join(lines)
+ cmds.append("INSERT INTO `heat_template`(`ARTIFACT_UUID`, `NAME`, `VERSION`, `BODY`, `TIMEOUT_MINUTES`, " \
+ "`DESCRIPTION`, `CREATION_TIMESTAMP`, `ARTIFACT_CHECKSUM`) VALUES(" \
+ "'efee1d84-b8ec-11e7-abc4-cec278b6b50a', 'Generic NeutronNet', '1', '{0}', 10, " \
+ "'Generic Neutron Template', NOW(), 'MANUAL RECORD');".format(longtext))
+
+ cmds.append("INSERT INTO `heat_template_params`(`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, " \
+ "`PARAM_TYPE`, `PARAM_ALIAS`) VALUES('efee1d84-b8ec-11e7-abc4-cec278b6b50a', 'shared', 0, " \
+ "'string', NULL);")
+
+ print('\n'.join(cmds))
+ vcpecommon.insert_into_so_db(cmds)
+
diff --git a/test/vcpe/csar_parser.py b/test/vcpe/csar_parser.py
new file mode 100755
index 0000000..f101364
--- /dev/null
+++ b/test/vcpe/csar_parser.py
@@ -0,0 +1,231 @@
+#! /usr/bin/python
+import os
+import zipfile
+import shutil
+import yaml
+import json
+import logging
+
+
+class CsarParser:
+ def __init__(self):
+ self.logger = logging.getLogger(__name__)
+ self.svc_model = {}
+ self.net_models = [] # there could be multiple networks
+ self.vnf_models = [] # this version only support a single VNF in the service template
+ self.vfmodule_models = [] # this version only support a single VF module in the service template
+
+ def get_service_yaml_from_csar(self, csar_file):
+ """
+ :param csar_file: csar file path name, e.g. 'csar/vgmux.csar'
+ :return:
+ """
+ tmpdir = './__tmp'
+ if os.path.isdir(tmpdir):
+ shutil.rmtree(tmpdir)
+ os.mkdir(tmpdir)
+
+ with zipfile.ZipFile(csar_file, "r") as zip_ref:
+ zip_ref.extractall(tmpdir)
+
+ yamldir = tmpdir + '/Definitions'
+ if os.path.isdir(yamldir):
+ for filename in os.listdir(yamldir):
+ # look for service template like this: service-Vcpesvcbrgemu111601-template.yml
+ if filename.startswith('service-') and filename.endswith('-template.yml'):
+ return os.path.join(yamldir, filename)
+
+ self.logger.error('Invalid file: ' + csar_file)
+ return ''
+
+ def get_service_model_info(self, svc_template):
+ """ extract service model info from yaml and convert to what to be used in SO request
+ Sample from yaml:
+ {
+ "UUID": "aed4fc5e-b871-4e26-8531-ceabd46df85e",
+ "category": "Network L1-3",
+ "description": "Infra service",
+ "ecompGeneratedNaming": true,
+ "invariantUUID": "c806682a-5b3a-44d8-9e88-0708be151296",
+ "name": "vcpesvcinfra111601",
+ "namingPolicy": "",
+ "serviceEcompNaming": true,
+ "serviceRole": "",
+ "serviceType": "",
+ "type": "Service"
+ },
+
+ Convert to
+ {
+ "modelType": "service",
+ "modelInvariantId": "ca4c7a70-06fd-45d8-8b9e-c9745d25bf2b",
+ "modelVersionId": "5d8911b4-e50c-4096-a81e-727a8157193c",
+ "modelName": "vcpesvcbrgemu111601",
+ "modelVersion": "1.0"
+ },
+
+ """
+ if svc_template['metadata']['type'] != 'Service':
+ self.logger.error('csar error: metadata->type is not Service')
+ return
+
+ metadata = svc_template['metadata']
+ self.svc_model = {
+ 'modelType': 'service',
+ 'modelInvariantId': metadata['invariantUUID'],
+ 'modelVersionId': metadata['UUID'],
+ 'modelName': metadata['name']
+ }
+ if 'version' in metadata:
+ self.svc_model['modelVersion'] = metadata['version']
+ else:
+ self.svc_model['modelVersion'] = '1.0'
+
+ def get_vnf_and_network_model_info(self, svc_template):
+ """ extract vnf and network model info from yaml and convert to what to be used in SO request
+ Sample from yaml:
+ "topology_template": {
+ "node_templates": {
+ "CPE_PUBLIC": {
+ "metadata": {
+ "UUID": "33b2c367-a165-4bb3-81c3-0150cd06ceff",
+ "category": "Generic",
+ "customizationUUID": "db1d4ac2-62cd-4e5d-b2dc-300dbd1a5da1",
+ "description": "Generic NeutronNet",
+ "invariantUUID": "3d4c0e47-4794-4e98-a794-baaced668930",
+ "name": "Generic NeutronNet",
+ "resourceVendor": "ATT (Tosca)",
+ "resourceVendorModelNumber": "",
+ "resourceVendorRelease": "1.0.0.wd03",
+ "subcategory": "Network Elements",
+ "type": "VL",
+ "version": "1.0"
+ },
+ "type": "org.openecomp.resource.vl.GenericNeutronNet"
+ },
+ Convert to
+ {
+ "modelType": "network",
+ "modelInvariantId": "3d4c0e47-4794-4e98-a794-baaced668930",
+ "modelVersionId": "33b2c367-a165-4bb3-81c3-0150cd06ceff",
+ "modelName": "Generic NeutronNet",
+ "modelVersion": "1.0",
+ "modelCustomizationId": "db1d4ac2-62cd-4e5d-b2dc-300dbd1a5da1",
+ "modelCustomizationName": "CPE_PUBLIC"
+ },
+ """
+ node_dic = svc_template['topology_template']['node_templates']
+ for node_name, v in node_dic.items():
+ model = {
+ 'modelInvariantId': v['metadata']['invariantUUID'],
+ 'modelVersionId': v['metadata']['UUID'],
+ 'modelName': v['metadata']['name'],
+ 'modelVersion': v['metadata']['version'],
+ 'modelCustomizationId': v['metadata']['customizationUUID'],
+ 'modelCustomizationName': node_name
+ }
+
+ if v['type'].startswith('org.openecomp.resource.vl.GenericNeutronNet'):
+ # a neutron network is found
+ self.logger.info('Parser found a network: ' + node_name)
+ model['modelType'] = 'network'
+ self.net_models.append(model)
+ elif v['type'].startswith('org.openecomp.resource.vf.'):
+ # a VNF is found
+ self.logger.info('Parser found a VNF: ' + node_name)
+ model['modelType'] = 'vnf'
+ self.vnf_models.append(model)
+ else:
+ self.logger.warning('Parser found a node that is neither a network nor a VNF: ' + node_name)
+
+ def get_vfmodule_model_info(self, svc_template):
+ """ extract network model info from yaml and convert to what to be used in SO request
+ Sample from yaml:
+ "topology_template": {
+ "groups": {
+ "vspinfra1116010..Vspinfra111601..base_vcpe_infra..module-0": {
+ "metadata": {
+ "vfModuleModelCustomizationUUID": "11ddac51-30e3-4a3f-92eb-2eb99c2cb288",
+ "vfModuleModelInvariantUUID": "02f70416-581e-4f00-bde1-d65e69af95c5",
+ "vfModuleModelName": "Vspinfra111601..base_vcpe_infra..module-0",
+ "vfModuleModelUUID": "88c78078-f1fd-4f73-bdd9-10420b0f6353",
+ "vfModuleModelVersion": "1"
+ },
+ "properties": {
+ "availability_zone_count": null,
+ "initial_count": 1,
+ "max_vf_module_instances": 1,
+ "min_vf_module_instances": 1,
+ "vf_module_description": null,
+ "vf_module_label": "base_vcpe_infra",
+ "vf_module_type": "Base",
+ "vfc_list": null,
+ "volume_group": false
+ },
+ "type": "org.openecomp.groups.VfModule"
+ }
+ },
+ Convert to
+ {
+ "modelType": "vfModule",
+ "modelInvariantId": "02f70416-581e-4f00-bde1-d65e69af95c5",
+ "modelVersionId": "88c78078-f1fd-4f73-bdd9-10420b0f6353",
+ "modelName": "Vspinfra111601..base_vcpe_infra..module-0",
+ "modelVersion": "1",
+ "modelCustomizationId": "11ddac51-30e3-4a3f-92eb-2eb99c2cb288",
+ "modelCustomizationName": "Vspinfra111601..base_vcpe_infra..module-0"
+ },
+ """
+ node_dic = svc_template['topology_template']['groups']
+ for node_name, v in node_dic.items():
+ if v['type'].startswith('org.openecomp.groups.VfModule'):
+ model = {
+ 'modelType': 'vfModule',
+ 'modelInvariantId': v['metadata']['vfModuleModelInvariantUUID'],
+ 'modelVersionId': v['metadata']['vfModuleModelUUID'],
+ 'modelName': v['metadata']['vfModuleModelName'],
+ 'modelVersion': v['metadata']['vfModuleModelVersion'],
+ 'modelCustomizationId': v['metadata']['vfModuleModelCustomizationUUID'],
+ 'modelCustomizationName': v['metadata']['vfModuleModelName']
+ }
+ self.vfmodule_models.append(model)
+ self.logger.info('Parser found a VF module: ' + model['modelCustomizationName'])
+
+ def parse_service_yaml(self, filename):
+ # clean up
+ self.svc_model = {}
+ self.net_models = [] # there could be multiple networks
+ self.vnf_models = [] # this version only support a single VNF in the service template
+ self.vfmodule_models = [] # this version only support a single VF module in the service template
+
+ svc_template = yaml.load(file(filename, 'r'))
+ self.get_service_model_info(svc_template)
+ self.get_vnf_and_network_model_info(svc_template)
+ self.get_vfmodule_model_info(svc_template)
+
+ return True
+
+ def parse_csar(self, csar_file):
+ yaml_file = self.get_service_yaml_from_csar(csar_file)
+ if yaml_file != '':
+ return self.parse_service_yaml(yaml_file)
+
+ def print_models(self):
+ print('---------Service Model----------')
+ print(json.dumps(self.svc_model, indent=2, sort_keys=True))
+
+ print('---------Network Model(s)----------')
+ for model in self.net_models:
+ print(json.dumps(model, indent=2, sort_keys=True))
+
+ print('---------VNF Model(s)----------')
+ for model in self.vnf_models:
+ print(json.dumps(model, indent=2, sort_keys=True))
+
+ print('---------VF Module Model(s)----------')
+ for model in self.vfmodule_models:
+ print(json.dumps(model, indent=2, sort_keys=True))
+
+ def test(self):
+ self.parse_csar('csar/service-Vcpesvcinfra111601-csar.csar')
+ self.print_models()
diff --git a/test/vcpe/get_info.py b/test/vcpe/get_info.py
new file mode 100755
index 0000000..5b0c687
--- /dev/null
+++ b/test/vcpe/get_info.py
@@ -0,0 +1,26 @@
+#! /usr/bin/python
+
+import time
+import logging
+import json
+import mysql.connector
+import ipaddress
+import re
+import sys
+import base64
+from vcpecommon import *
+import preload
+import vcpe_custom_service
+
+
+logging.basicConfig(level=logging.INFO, format='%(message)s')
+
+vcpecommon = VcpeCommon()
+nodes=['brg', 'bng', 'mux', 'dhcp']
+hosts = vcpecommon.get_vm_ip(nodes)
+print(json.dumps(hosts, indent=4, sort_keys=True))
+
+
+
+
+
diff --git a/test/vcpe/healthcheck.py b/test/vcpe/healthcheck.py
new file mode 100755
index 0000000..b94848e
--- /dev/null
+++ b/test/vcpe/healthcheck.py
@@ -0,0 +1,30 @@
+#! /usr/bin/python
+
+import logging
+import json
+from vcpecommon import *
+import commands
+
+
+logging.basicConfig(level=logging.INFO, format='%(message)s')
+common = VcpeCommon()
+
+print('Checking vGMUX REST API from SDNC')
+cmd = 'curl -u admin:admin -X GET http://10.0.101.21:8183/restconf/config/ietf-interfaces:interfaces'
+ret = commands.getstatusoutput("ssh -i onap_dev root@sdnc '{0}'".format(cmd))
+sz = ret[-1].split('\n')[-1]
+print('\n')
+print(sz)
+
+print('Checking vBRG REST API from SDNC')
+cmd = 'curl -u admin:admin -X GET http://10.3.0.2:8183/restconf/config/ietf-interfaces:interfaces'
+ret = commands.getstatusoutput("ssh -i onap_dev root@sdnc '{0}'".format(cmd))
+sz = ret[-1].split('\n')[-1]
+print('\n')
+print(sz)
+
+print('Checking SDNC DB for vBRG MAC address')
+mac = common.get_brg_mac_from_sdnc()
+print(mac)
+
+
diff --git a/test/vcpe/loop.py b/test/vcpe/loop.py
new file mode 100755
index 0000000..ad58797
--- /dev/null
+++ b/test/vcpe/loop.py
@@ -0,0 +1,37 @@
+#! /usr/bin/python
+
+import time
+import logging
+import json
+import mysql.connector
+import ipaddress
+import re
+import sys
+import base64
+from vcpecommon import *
+import preload
+import commands
+import vcpe_custom_service
+
+
+logging.basicConfig(level=logging.INFO, format='%(message)s')
+
+cpecommon = VcpeCommon()
+custom = vcpe_custom_service.CustomService(cpecommon)
+
+nodes=['mux']
+hosts = cpecommon.get_vm_ip(nodes)
+
+custom.del_vgmux_ves_mode(hosts['mux'])
+time.sleep(2)
+custom.del_vgmux_ves_collector(hosts['mux'])
+exit()
+
+time.sleep(2)
+logging.info('Setting vGMUX DCAE collector IP address')
+custom.set_vgmux_ves_collector(hosts['mux'])
+time.sleep(2)
+vgmux_vnf_name = cpecommon.load_object('vgmux_vnf_name')
+logging.info('vGMUX VNF instance name is %s', vgmux_vnf_name)
+logging.info('Letting vGMUX report packet loss to DCAE')
+custom.set_vgmux_packet_loss_rate(hosts['mux'], 55, vgmux_vnf_name)
diff --git a/test/vcpe/preload.py b/test/vcpe/preload.py
new file mode 100755
index 0000000..c4efafd
--- /dev/null
+++ b/test/vcpe/preload.py
@@ -0,0 +1,216 @@
+#! /usr/bin/python
+
+import requests
+import json
+import sys
+from datetime import datetime
+from vcpecommon import *
+import csar_parser
+import logging
+import base64
+
+
+class Preload:
+ def __init__(self, vcpecommon):
+ self.logger = logging.getLogger(__name__)
+ self.vcpecommon = vcpecommon
+
+ def replace(self, sz, replace_dict):
+ for old_string, new_string in replace_dict.items():
+ sz = sz.replace(old_string, new_string)
+ if self.vcpecommon.template_variable_symbol in sz:
+ self.logger.error('Error! Cannot find a value to replace ' + sz)
+ return sz
+
+ def generate_json(self, template_file, replace_dict):
+ with open(template_file) as json_input:
+ json_data = json.load(json_input)
+ stk = [json_data]
+ while len(stk) > 0:
+ data = stk.pop()
+ for k, v in data.items():
+ if type(v) is dict:
+ stk.append(v)
+ elif type(v) is list:
+ stk.extend(v)
+ elif type(v) is str or type(v) is unicode:
+ if self.vcpecommon.template_variable_symbol in v:
+ data[k] = self.replace(v, replace_dict)
+ else:
+ self.logger.warning('Unexpected line in template: %s. Look for value %s', template_file, v)
+ return json_data
+
+ def reset_sniro(self):
+ self.logger.debug('Clearing SNIRO data')
+ r = requests.post(self.vcpecommon.sniro_url + '/reset', headers=self.vcpecommon.sniro_headers)
+ if 2 != r.status_code / 100:
+ self.logger.debug(r.content)
+ self.logger.error('Clearing SNIRO date failed.')
+ sys.exit()
+
+ def preload_sniro(self, template_sniro_data, template_sniro_request, tunnelxconn_ar_name, vgw_name, vbrg_ar_name,
+ vgmux_svc_instance_uuid, vbrg_svc_instance_uuid):
+ self.reset_sniro()
+ self.logger.info('Preloading SNIRO for homing service')
+ replace_dict = {'${tunnelxconn_ar_name}': tunnelxconn_ar_name,
+ '${vgw_name}': vgw_name,
+ '${brg_ar_name}': vbrg_ar_name,
+ '${vgmux_svc_instance_uuid}': vgmux_svc_instance_uuid,
+ '${vbrg_svc_instance_uuid}': vbrg_svc_instance_uuid
+ }
+ sniro_data = self.generate_json(template_sniro_data, replace_dict)
+ self.logger.debug('SNIRO data:')
+ self.logger.debug(json.dumps(sniro_data, indent=4, sort_keys=True))
+
+ base64_sniro_data = base64.b64encode(json.dumps(sniro_data))
+ self.logger.debug('SNIRO data: 64')
+ self.logger.debug(base64_sniro_data)
+ replace_dict = {'${base64_sniro_data}': base64_sniro_data, '${sniro_ip}': self.vcpecommon.hosts['robot']}
+ sniro_request = self.generate_json(template_sniro_request, replace_dict)
+ self.logger.debug('SNIRO request:')
+ self.logger.debug(json.dumps(sniro_request, indent=4, sort_keys=True))
+
+ r = requests.post(self.vcpecommon.sniro_url, headers=self.vcpecommon.sniro_headers, json=sniro_request)
+ if 2 != r.status_code / 100:
+ response = r.json()
+ self.logger.debug(json.dumps(response, indent=4, sort_keys=True))
+ self.logger.error('SNIRO preloading failed.')
+ sys.exit()
+
+ return True
+
+ def preload_network(self, template_file, network_role, subnet_start_ip, subnet_gateway, common_dict, name_suffix):
+ """
+ :param template_file:
+ :param network_role: cpe_signal, cpe_public, brg_bng, bng_mux, mux_gw
+ :param subnet_start_ip:
+ :param subnet_gateway:
+ :param name_suffix: e.g. '201711201311'
+ :return:
+ """
+ network_name = '_'.join([self.vcpecommon.instance_name_prefix['network'], network_role.lower(), name_suffix])
+ subnet_name = self.vcpecommon.network_name_to_subnet_name(network_name)
+ common_dict['${' + network_role+'_net}'] = network_name
+ common_dict['${' + network_role+'_subnet}'] = subnet_name
+ replace_dict = {'${network_role}': network_role,
+ '${service_type}': 'vCPE',
+ '${network_type}': 'Generic NeutronNet',
+ '${network_name}': network_name,
+ '${subnet_start_ip}': subnet_start_ip,
+ '${subnet_gateway}': subnet_gateway
+ }
+ self.logger.info('Preloading network ' + network_role)
+ return self.preload(template_file, replace_dict, self.vcpecommon.sdnc_preload_network_url)
+
+ def preload(self, template_file, replace_dict, url):
+ json_data = self.generate_json(template_file, replace_dict)
+ self.logger.debug(json.dumps(json_data, indent=4, sort_keys=True))
+ r = requests.post(url, headers=self.vcpecommon.sdnc_headers, auth=self.vcpecommon.sdnc_userpass, json=json_data)
+ response = r.json()
+ if int(response.get('output', {}).get('response-code', 0)) != 200:
+ self.logger.debug(json.dumps(response, indent=4, sort_keys=True))
+ self.logger.error('Preloading failed.')
+ return False
+ return True
+
+ def preload_vgw(self, template_file, brg_mac, commont_dict, name_suffix):
+ replace_dict = {'${brg_mac}': brg_mac,
+ '${suffix}': name_suffix
+ }
+ replace_dict.update(commont_dict)
+ self.logger.info('Preloading vGW')
+ return self.preload(template_file, replace_dict, self.vcpecommon.sdnc_preload_vnf_url)
+
+ def preload_vfmodule(self, template_file, service_instance_id, vnf_model, vfmodule_model, common_dict, name_suffix):
+ """
+ :param template_file:
+ :param service_instance_id:
+ :param vnf_model: parsing results from csar_parser
+ :param vfmodule_model: parsing results from csar_parser
+ :param common_dict:
+ :param name_suffix:
+ :return:
+ """
+
+ # examples:
+ # vfmodule_model['modelCustomizationName']: "Vspinfra111601..base_vcpe_infra..module-0",
+ # vnf_model['modelCustomizationName']: "vspinfra111601 0",
+
+ vfmodule_name = '_'.join([self.vcpecommon.instance_name_prefix['vfmodule'],
+ vfmodule_model['modelCustomizationName'].split('..')[0].lower(), name_suffix])
+
+ # vnf_type and generic_vnf_type are identical
+ replace_dict = {'${vnf_type}': vfmodule_model['modelCustomizationName'],
+ '${generic_vnf_type}': vfmodule_model['modelCustomizationName'],
+ '${service_type}': service_instance_id,
+ '${generic_vnf_name}': vnf_model['modelCustomizationName'],
+ '${vnf_name}': vfmodule_name,
+ '${suffix}': name_suffix}
+ replace_dict.update(common_dict)
+ self.logger.info('Preloading VF Module ' + vfmodule_name)
+ return self.preload(template_file, replace_dict, self.vcpecommon.sdnc_preload_vnf_url)
+
+ def preload_all_networks(self, template_file, name_suffix):
+ common_dict = {'${' + k + '}': v for k, v in self.vcpecommon.common_preload_config.items()}
+ for network, v in self.vcpecommon.preload_network_config.items():
+ subnet_start_ip, subnet_gateway_ip = v
+ if not self.preload_network(template_file, network, subnet_start_ip, subnet_gateway_ip,
+ common_dict, name_suffix):
+ return None
+ return common_dict
+
+ def test(self):
+ # this is for testing purpose
+ name_suffix = datetime.now().strftime('%Y%m%d%H%M')
+ vcpecommon = VcpeCommon()
+ preloader = Preload(vcpecommon)
+
+ network_dict = {'${' + k + '}': v for k, v in self.vcpecommon.common_preload_config.items()}
+ template_file = 'preload_templates/template.network.json'
+ for k, v in self.vcpecommon.preload_network_config.items():
+ if not preloader.preload_network(template_file, k, v[0], v[1], network_dict, name_suffix):
+ break
+
+ print('---------------------------------------------------------------')
+ print('Network related replacement dictionary:')
+ print(json.dumps(network_dict, indent=4, sort_keys=True))
+ print('---------------------------------------------------------------')
+
+ keys = ['infra', 'bng', 'gmux', 'brg']
+ for key in keys:
+ csar_file = self.vcpecommon.find_file(key, 'csar', 'csar')
+ template_file = self.vcpecommon.find_file(key, 'json', 'preload_templates')
+ if csar_file and template_file:
+ parser = csar_parser.CsarParser()
+ parser.parse_csar(csar_file)
+ service_instance_id = 'test112233'
+ preloader.preload_vfmodule(template_file, service_instance_id, parser.vnf_models[0],
+ parser.vfmodule_models[0], network_dict, name_suffix)
+
+ def test_sniro(self):
+ template_sniro_data = self.vcpecommon.find_file('sniro_data', 'json', 'preload_templates')
+ template_sniro_request = self.vcpecommon.find_file('sniro_request', 'json', 'preload_templates')
+
+ vcperescust_csar = self.vcpecommon.find_file('rescust', 'csar', 'csar')
+ parser = csar_parser.CsarParser()
+ parser.parse_csar(vcperescust_csar)
+ tunnelxconn_ar_name = None
+ brg_ar_name = None
+ vgw_name = None
+ for model in parser.vnf_models:
+ if 'tunnel' in model['modelCustomizationName']:
+ tunnelxconn_ar_name = model['modelCustomizationName']
+ elif 'brg' in model['modelCustomizationName']:
+ brg_ar_name = model['modelCustomizationName']
+ elif 'vgw' in model['modelCustomizationName']:
+ vgw_name = model['modelCustomizationName']
+
+ if not (tunnelxconn_ar_name and brg_ar_name and vgw_name):
+ self.logger.error('Cannot find all names from %s.', vcperescust_csar)
+ sys.exit()
+
+ vgmux_svc_instance_uuid = '88888888888888'
+ vbrg_svc_instance_uuid = '999999999999999'
+
+ self.preload_sniro(template_sniro_data, template_sniro_request, tunnelxconn_ar_name, vgw_name, brg_ar_name,
+ vgmux_svc_instance_uuid, vbrg_svc_instance_uuid)
diff --git a/test/vcpe/soutils.py b/test/vcpe/soutils.py
new file mode 100755
index 0000000..cc82068
--- /dev/null
+++ b/test/vcpe/soutils.py
@@ -0,0 +1,318 @@
+#! /usr/bin/python
+
+import sys
+import logging
+import requests
+import json
+from datetime import datetime
+import progressbar
+import time
+import csar_parser
+import preload
+from vcpecommon import *
+
+
+class SoUtils:
+ def __init__(self, vcpecommon, api_version):
+ """
+ :param vcpecommon:
+ :param api_version: must be 'v4' or 'v5'
+ """
+ self.logger = logging.getLogger(__name__)
+ self.vcpecommon = vcpecommon
+ if api_version not in self.vcpecommon.so_req_api_url:
+ self.logger.error('Incorrect SO API version: %s', api_version)
+ sys.exit()
+ self.service_req_api_url = self.vcpecommon.so_req_api_url[api_version]
+
+ def submit_create_req(self, req_json, req_type, service_instance_id=None, vnf_instance_id=None):
+ """
+ POST {serverRoot}/serviceInstances/v4
+ POST {serverRoot}/serviceInstances/v4/{serviceInstanceId}/vnfs
+ POST {serverRoot}/serviceInstances/v4/{serviceInstanceId}/networks
+ POST {serverRoot}/serviceInstances/v4/{serviceInstanceId}/vnfs/{vnfInstanceId}/vfModules
+ :param req_json:
+ :param service_instance_id: this is required only for networks, vnfs, and vf modules
+ :param req_type:
+ :param vnf_instance_id:
+ :return: req_id, instance_id
+ """
+ if req_type == 'service':
+ url = self.service_req_api_url
+ elif req_type == 'vnf':
+ url = '/'.join([self.service_req_api_url, service_instance_id, 'vnfs'])
+ elif req_type == 'network':
+ url = '/'.join([self.service_req_api_url, service_instance_id, 'networks'])
+ elif req_type == 'vfmodule':
+ url = '/'.join([self.service_req_api_url, service_instance_id, 'vnfs', vnf_instance_id, 'vfModules'])
+ else:
+ self.logger.error('Invalid request type: {0}. Can only be service/vnf/network/vfmodule'.format(req_type))
+ return None, None
+
+ r = requests.post(url, headers=self.vcpecommon.so_headers, auth=self.vcpecommon.so_userpass, json=req_json)
+ response = r.json()
+
+ self.logger.debug('---------------------------------------------------------------')
+ self.logger.debug('------- Creation request submitted to SO, got response --------')
+ self.logger.debug(json.dumps(response, indent=4, sort_keys=True))
+ self.logger.debug('---------------------------------------------------------------')
+ req_id = response.get('requestReferences', {}).get('requestId', '')
+ instance_id = response.get('requestReferences', {}).get('instanceId', '')
+ return req_id, instance_id
+
+ def check_progress(self, req_id, eta=0, interval=5):
+ if not req_id:
+ self.logger.error('Error when checking SO request progress, invalid request ID: ' + req_id)
+ return False
+ duration = 0.0
+ bar = progressbar.ProgressBar(redirect_stdout=True)
+ url = self.vcpecommon.so_check_progress_api_url + '/' + req_id
+
+ while True:
+ time.sleep(interval)
+ r = requests.get(url, headers=self.vcpecommon.so_headers, auth=self.vcpecommon.so_userpass)
+ response = r.json()
+
+ duration += interval
+ if eta > 0:
+ percentage = min(95, 100 * duration / eta)
+ else:
+ percentage = int(response['request']['requestStatus']['percentProgress'])
+
+ if response['request']['requestStatus']['requestState'] == 'IN_PROGRESS':
+ self.logger.debug('------------------Request Status-------------------------------')
+ self.logger.debug(json.dumps(response, indent=4, sort_keys=True))
+ bar.update(percentage)
+ else:
+ self.logger.debug('---------------------------------------------------------------')
+ self.logger.debug('----------------- Creation Request Results --------------------')
+ self.logger.debug(json.dumps(response, indent=4, sort_keys=True))
+ self.logger.debug('---------------------------------------------------------------')
+ flag = response['request']['requestStatus']['requestState'] == 'COMPLETE'
+ if not flag:
+ self.logger.error('Request failed.')
+ self.logger.error(json.dumps(response, indent=4, sort_keys=True))
+ bar.update(100)
+ bar.finish()
+ return flag
+
+ def add_req_info(self, req_details, instance_name, product_family_id=None):
+ req_details['requestInfo'] = {
+ 'instanceName': instance_name,
+ 'source': 'VID',
+ 'suppressRollback': 'true',
+ 'requestorId': 'vCPE-Robot'
+ }
+ if product_family_id:
+ req_details['requestInfo']['productFamilyId'] = product_family_id
+
+ def add_related_instance(self, req_details, instance_id, instance_model):
+ instance = {"instanceId": instance_id, "modelInfo": instance_model}
+ if 'relatedInstanceList' not in req_details:
+ req_details['relatedInstanceList'] = [{"relatedInstance": instance}]
+ else:
+ req_details['relatedInstanceList'].append({"relatedInstance": instance})
+
+ def generate_vnf_or_network_request(self, req_type, instance_name, vnf_or_network_model, service_instance_id,
+ service_model):
+ req_details = {
+ 'modelInfo': vnf_or_network_model,
+ 'cloudConfiguration': {"lcpCloudRegionId": self.vcpecommon.os_region_name,
+ "tenantId": self.vcpecommon.os_tenant_id},
+ 'requestParameters': {"userParams": []}
+ }
+ self.add_req_info(req_details, instance_name, self.vcpecommon.product_family_id)
+ self.add_related_instance(req_details, service_instance_id, service_model)
+ return {'requestDetails': req_details}
+
+ def generate_vfmodule_request(self, instance_name, vfmodule_model, service_instance_id,
+ service_model, vnf_instance_id, vnf_model):
+ req_details = {
+ 'modelInfo': vfmodule_model,
+ 'cloudConfiguration': {"lcpCloudRegionId": self.vcpecommon.os_region_name,
+ "tenantId": self.vcpecommon.os_tenant_id},
+ 'requestParameters': {"usePreload": 'true'}
+ }
+ self.add_req_info(req_details, instance_name, self.vcpecommon.product_family_id)
+ self.add_related_instance(req_details, service_instance_id, service_model)
+ self.add_related_instance(req_details, vnf_instance_id, vnf_model)
+ return {'requestDetails': req_details}
+
+ def generate_service_request(self, instance_name, model):
+ req_details = {
+ 'modelInfo': model,
+ 'subscriberInfo': {'globalSubscriberId': self.vcpecommon.global_subscriber_id},
+ 'requestParameters': {
+ "userParams": [],
+ "subscriptionServiceType": "vCPE",
+ "aLaCarte": 'true'
+ }
+ }
+ self.add_req_info(req_details, instance_name)
+ return {'requestDetails': req_details}
+
+ def generate_custom_service_request(self, instance_name, model, brg_mac):
+ req_details = {
+ 'modelInfo': model,
+ 'subscriberInfo': {'subscriberName': 'Kaneohe',
+ 'globalSubscriberId': self.vcpecommon.global_subscriber_id},
+ 'cloudConfiguration': {"lcpCloudRegionId": self.vcpecommon.os_region_name,
+ "tenantId": self.vcpecommon.os_tenant_id},
+ 'requestParameters': {
+ "userParams": [
+ {
+ 'name': 'BRG_WAN_MAC_Address',
+ 'value': brg_mac
+ }
+ ],
+ "subscriptionServiceType": "vCPE",
+ 'aLaCarte': 'false'
+ }
+ }
+ self.add_req_info(req_details, instance_name, self.vcpecommon.custom_product_family_id)
+ return {'requestDetails': req_details}
+
+ def create_custom_service(self, csar_file, brg_mac, name_suffix=None):
+ parser = csar_parser.CsarParser()
+ if not parser.parse_csar(csar_file):
+ return False
+
+ # yyyymmdd_hhmm
+ if not name_suffix:
+ name_suffix = '_' + datetime.now().strftime('%Y%m%d%H%M')
+
+ # create service
+ instance_name = '_'.join([self.vcpecommon.instance_name_prefix['service'],
+ parser.svc_model['modelName'], name_suffix])
+ instance_name = instance_name.lower()
+ req = self.generate_custom_service_request(instance_name, parser.svc_model, brg_mac)
+ self.logger.debug(json.dumps(req, indent=2, sort_keys=True))
+ self.logger.info('Creating custom service {0}.'.format(instance_name))
+ req_id, svc_instance_id = self.submit_create_req(req, 'service')
+ if not self.check_progress(req_id, 140):
+ return False
+ return True
+
+ def wait_for_aai(self, node_type, uuid):
+ self.logger.info('Waiting for AAI traversal to complete...')
+ bar = progressbar.ProgressBar()
+ for i in range(30):
+ time.sleep(1)
+ bar.update(i*100.0/30)
+ if self.vcpecommon.is_node_in_aai(node_type, uuid):
+ bar.update(100)
+ bar.finish()
+ return
+
+ self.logger.error("AAI traversal didn't finish in 30 seconds. Something is wrong. Type {0}, UUID {1}".format(
+ node_type, uuid))
+ sys.exit()
+
+ def create_entire_service(self, csar_file, vnf_template_file, preload_dict, name_suffix, heatbridge=False):
+ """
+ :param csar_file:
+ :param vnf_template_file:
+ :param preload_dict:
+ :param name_suffix:
+ :return: service instance UUID
+ """
+ self.logger.info('\n----------------------------------------------------------------------------------')
+ self.logger.info('Start to create entire service defined in csar: {0}'.format(csar_file))
+ parser = csar_parser.CsarParser()
+ self.logger.info('Parsing csar ...')
+ if not parser.parse_csar(csar_file):
+ self.logger.error('Cannot parse csar: {0}'.format(csar_file))
+ return None
+
+ # create service
+ instance_name = '_'.join([self.vcpecommon.instance_name_prefix['service'],
+ parser.svc_model['modelName'], name_suffix])
+ instance_name = instance_name.lower()
+ self.logger.info('Creating service instance: {0}.'.format(instance_name))
+ req = self.generate_service_request(instance_name, parser.svc_model)
+ self.logger.debug(json.dumps(req, indent=2, sort_keys=True))
+ req_id, svc_instance_id = self.submit_create_req(req, 'service')
+ if not self.check_progress(req_id, eta=2, interval=1):
+ return None
+
+ # wait for AAI to complete traversal
+ self.wait_for_aai('service', svc_instance_id)
+
+ # create networks
+ for model in parser.net_models:
+ base_name = model['modelCustomizationName'].lower().replace('mux_vg', 'mux_gw')
+ network_name = '_'.join([self.vcpecommon.instance_name_prefix['network'], base_name, name_suffix])
+ network_name = network_name.lower()
+ self.logger.info('Creating network: ' + network_name)
+ req = self.generate_vnf_or_network_request('network', network_name, model, svc_instance_id,
+ parser.svc_model)
+ self.logger.debug(json.dumps(req, indent=2, sort_keys=True))
+ req_id, net_instance_id = self.submit_create_req(req, 'network', svc_instance_id)
+ if not self.check_progress(req_id, eta=20):
+ return None
+
+ self.logger.info('Changing subnet name to ' + self.vcpecommon.network_name_to_subnet_name(network_name))
+ self.vcpecommon.set_network_name(network_name)
+ subnet_name_changed = False
+ for i in range(20):
+ time.sleep(3)
+ if self.vcpecommon.set_subnet_name(network_name):
+ subnet_name_changed = True
+ break
+
+ if not subnet_name_changed:
+ self.logger.error('Failed to change subnet name for ' + network_name)
+ return None
+
+
+ vnf_model = None
+ vnf_instance_id = None
+ # create VNF
+ if len(parser.vnf_models) == 1:
+ vnf_model = parser.vnf_models[0]
+ vnf_instance_name = '_'.join([self.vcpecommon.instance_name_prefix['vnf'],
+ vnf_model['modelCustomizationName'].split(' ')[0], name_suffix])
+ vnf_instance_name = vnf_instance_name.lower()
+ self.logger.info('Creating VNF: ' + vnf_instance_name)
+ req = self.generate_vnf_or_network_request('vnf', vnf_instance_name, vnf_model, svc_instance_id,
+ parser.svc_model)
+ self.logger.debug(json.dumps(req, indent=2, sort_keys=True))
+ req_id, vnf_instance_id = self.submit_create_req(req, 'vnf', svc_instance_id)
+ if not self.check_progress(req_id, eta=2, interval=1):
+ self.logger.error('Failed to create VNF {0}.'.format(vnf_instance_name))
+ return False
+
+ # wait for AAI to complete traversal
+ if not vnf_instance_id:
+ self.logger.error('No VNF instance ID returned!')
+ sys.exit()
+ self.wait_for_aai('vnf', vnf_instance_id)
+
+ preloader = preload.Preload(self.vcpecommon)
+ preloader.preload_vfmodule(vnf_template_file, svc_instance_id, parser.vnf_models[0], parser.vfmodule_models[0],
+ preload_dict, name_suffix)
+ # create VF Module
+ if len(parser.vfmodule_models) == 1:
+ if not vnf_instance_id or not vnf_model:
+ self.logger.error('Invalid VNF instance ID or VNF model!')
+ sys.exit()
+
+ model = parser.vfmodule_models[0]
+ vfmodule_instance_name = '_'.join([self.vcpecommon.instance_name_prefix['vfmodule'],
+ model['modelCustomizationName'].split('..')[0], name_suffix])
+ vfmodule_instance_name = vfmodule_instance_name.lower()
+ self.logger.info('Creating VF Module: ' + vfmodule_instance_name)
+ req = self.generate_vfmodule_request(vfmodule_instance_name, model, svc_instance_id, parser.svc_model,
+ vnf_instance_id, vnf_model)
+ self.logger.debug(json.dumps(req, indent=2, sort_keys=True))
+ req_id, vfmodule_instance_id = self.submit_create_req(req, 'vfmodule', svc_instance_id, vnf_instance_id)
+ if not self.check_progress(req_id, eta=70, interval=5):
+ self.logger.error('Failed to create VF Module {0}.'.format(vfmodule_instance_name))
+ return None
+
+ # run heatbridge
+ if heatbridge:
+ self.vcpecommon.headbridge(vfmodule_instance_name, svc_instance_id)
+ self.vcpecommon.save_vgmux_vnf_name(vnf_instance_name)
+
+ return svc_instance_id
diff --git a/test/vcpe/vcpe.py b/test/vcpe/vcpe.py
new file mode 100755
index 0000000..7de86ae
--- /dev/null
+++ b/test/vcpe/vcpe.py
@@ -0,0 +1,207 @@
+#! /usr/bin/python
+import sys
+import logging
+from vcpecommon import *
+import soutils
+from datetime import datetime
+import preload
+import vcpe_custom_service
+import csar_parser
+import config_sdnc_so
+
+
+def config_sniro(vcpecommon, vgmux_svc_instance_uuid, vbrg_svc_instance_uuid):
+ logger = logging.getLogger(__name__)
+
+ logger.info('\n----------------------------------------------------------------------------------')
+ logger.info('Start to config SNIRO homing emulator')
+
+ preloader = preload.Preload(vcpecommon)
+ template_sniro_data = vcpecommon.find_file('sniro_data', 'json', 'preload_templates')
+ template_sniro_request = vcpecommon.find_file('sniro_request', 'json', 'preload_templates')
+
+ vcperescust_csar = vcpecommon.find_file('rescust', 'csar', 'csar')
+ parser = csar_parser.CsarParser()
+ parser.parse_csar(vcperescust_csar)
+ tunnelxconn_ar_name = None
+ brg_ar_name = None
+ vgw_name = None
+ for model in parser.vnf_models:
+ if 'tunnel' in model['modelCustomizationName']:
+ tunnelxconn_ar_name = model['modelCustomizationName']
+ elif 'brg' in model['modelCustomizationName']:
+ brg_ar_name = model['modelCustomizationName']
+ elif 'vgw' in model['modelCustomizationName']:
+ vgw_name = model['modelCustomizationName']
+
+ if not (tunnelxconn_ar_name and brg_ar_name and vgw_name):
+ logger.error('Cannot find all names from %s.', vcperescust_csar)
+ sys.exit()
+
+ preloader.preload_sniro(template_sniro_data, template_sniro_request, tunnelxconn_ar_name, vgw_name, brg_ar_name,
+ vgmux_svc_instance_uuid, vbrg_svc_instance_uuid)
+
+
+def create_one_service(vcpecommon, csar_file, vnf_template_file, preload_dict, suffix, heatbridge=False):
+ """
+ :return: service instance UUID
+ """
+ so = soutils.SoUtils(vcpecommon, 'v4')
+ return so.create_entire_service(csar_file, vnf_template_file, preload_dict, suffix, heatbridge)
+
+def deploy_brg_only():
+ logging.basicConfig(level=logging.INFO, format='%(message)s')
+ logger = logging.getLogger(__name__)
+
+ vcpecommon = VcpeCommon()
+ preload_dict = vcpecommon.load_preload_data()
+ name_suffix = preload_dict['${brg_bng_net}'].split('_')[-1]
+
+ # create multiple services based on the pre-determined order
+ svc_instance_uuid = vcpecommon.load_object(vcpecommon.svc_instance_uuid_file)
+ for keyword in ['brg']:
+ heatbridge = 'gmux' == keyword
+ csar_file = vcpecommon.find_file(keyword, 'csar', 'csar')
+ vnf_template_file = vcpecommon.find_file(keyword, 'json', 'preload_templates')
+ svc_instance_uuid[keyword] = create_one_service(vcpecommon, csar_file, vnf_template_file, preload_dict,
+ name_suffix, heatbridge)
+ if not svc_instance_uuid[keyword]:
+ sys.exit()
+
+ # Setting up SNIRO
+ config_sniro(vcpecommon, svc_instance_uuid['gmux'], svc_instance_uuid['brg'])
+
+def deploy_infra():
+ logging.basicConfig(level=logging.INFO, format='%(message)s')
+ logger = logging.getLogger(__name__)
+
+ vcpecommon = VcpeCommon()
+
+ # preload all networks
+ network_template = vcpecommon.find_file('network', 'json', 'preload_templates')
+ name_suffix = datetime.now().strftime('%Y%m%d%H%M')
+ preloader = preload.Preload(vcpecommon)
+ preload_dict = preloader.preload_all_networks(network_template, name_suffix)
+ logger.debug('Initial preload dictionary:')
+ logger.debug(json.dumps(preload_dict, indent=4, sort_keys=True))
+ if not preload_dict:
+ logger.error("Failed to preload networks.")
+ sys.exit()
+ vcpecommon.save_preload_data(preload_dict)
+
+ # create multiple services based on the pre-determined order
+ svc_instance_uuid = {}
+ for keyword in ['infra', 'bng', 'gmux', 'brg']:
+ heatbridge = 'gmux' == keyword
+ csar_file = vcpecommon.find_file(keyword, 'csar', 'csar')
+ vnf_template_file = vcpecommon.find_file(keyword, 'json', 'preload_templates')
+ svc_instance_uuid[keyword] = create_one_service(vcpecommon, csar_file, vnf_template_file, preload_dict,
+ name_suffix, heatbridge)
+ if not svc_instance_uuid[keyword]:
+ sys.exit()
+
+ vcpecommon.save_object(svc_instance_uuid, vcpecommon.svc_instance_uuid_file)
+ # Setting up SNIRO
+ config_sniro(vcpecommon, svc_instance_uuid['gmux'], svc_instance_uuid['brg'])
+
+ print('----------------------------------------------------------------------------------------------------')
+ print('Congratulations! The following have been completed correctly:')
+ print(' - Infrastructure Service Instantiation: ')
+ print(' * 4 VMs: DHCP, AAA, DNS, Web Server')
+ print(' * 2 Networks: CPE_PUBLIC, CPE_SIGNAL')
+ print(' - vBNG Service Instantiation: ')
+ print(' * 1 VM: vBNG')
+ print(' * 2 Networks: BRG_BNG, BNG_MUX')
+ print(' - vGMUX Service Instantiation: ')
+ print(' * 1 VM: vGMUX')
+ print(' * 1 Network: MUX_GW')
+ print(' - vBRG Service Instantiation: ')
+ print(' * 1 VM: vBRG')
+ print(' - Adding vGMUX vServer information to AAI.')
+ print(' - SNIRO Homing Emulator configuration.')
+
+
+def deploy_custom_service():
+ nodes = ['brg', 'mux']
+ vcpecommon = VcpeCommon(nodes)
+ custom_service = vcpe_custom_service.CustomService(vcpecommon)
+
+ # clean up
+ host_dic = {k: vcpecommon.hosts[k] for k in nodes}
+ if not vcpecommon.delete_vxlan_interfaces(host_dic):
+ sys.exit()
+
+ custom_service.clean_up_sdnc()
+ custom_service.del_all_vgw_stacks(vcpecommon.vgw_name_keyword)
+
+ # create new service
+ csar_file = vcpecommon.find_file('rescust', 'csar', 'csar')
+ vgw_template_file = vcpecommon.find_file('vgw', 'json', 'preload_templates')
+ preload_dict = vcpecommon.load_preload_data()
+ custom_service.create_custom_service(csar_file, vgw_template_file, preload_dict)
+
+
+def closed_loop(lossrate=0):
+ if lossrate > 0:
+ while 'y' != raw_input('Please enter docker container "drools" in Policy VM and type "policy stop". Then enter y here: ').lower():
+ continue
+ nodes = ['brg', 'mux']
+ logger = logging.getLogger('__name__')
+ vcpecommon = VcpeCommon(nodes)
+ logger.info('Cleaning up vGMUX data reporting settings')
+ vcpecommon.del_vgmux_ves_mode()
+ time.sleep(2)
+ vcpecommon.del_vgmux_ves_collector()
+
+ logger.info('Staring vGMUX data reporting to DCAE')
+ time.sleep(2)
+ vcpecommon.set_vgmux_ves_collector()
+
+ logger.info('Setting vGMUX to report packet loss rate: %s', lossrate)
+ time.sleep(2)
+ vcpecommon.set_vgmux_packet_loss_rate(lossrate, vcpecommon.load_vgmux_vnf_name())
+ if lossrate > 0:
+ print('Please enter docker container "drools" in Policy VM and type "policy start". Then observe vGMUX being restarted.')
+
+
+def init_so_sdnc():
+ logger = logging.getLogger('__name__')
+ vcpecommon = VcpeCommon()
+ config_sdnc_so.insert_customer_service_to_so(vcpecommon)
+ config_sdnc_so.insert_customer_service_to_sdnc(vcpecommon)
+
+
+if __name__ == '__main__':
+ logging.basicConfig(level=logging.INFO, format='%(message)s')
+
+ print('----------------------------------------------------------------------------------------------------')
+ print(' vcpe.py: Brief info about this program')
+# print(' vcpe.py sdc: Onboard VNFs, design and distribute vCPE services (under development)')
+ print(' vcpe.py init: Add customer service data to SDNC and SO DBs.')
+ print(' vcpe.py infra: Deploy infrastructure, including DHCP, AAA, DNS, Web Server, vBNG, vGMUX, vBRG.')
+ print(' vcpe.py customer: Deploy customer service, including vGW and VxLANs')
+ print(' vcpe.py loop: Test closed loop control')
+ print('----------------------------------------------------------------------------------------------------')
+
+ if len(sys.argv) != 2:
+ sys.exit()
+
+ if sys.argv[1] == 'sdc':
+ print('Under development')
+ elif sys.argv[1] == 'init':
+ if 'y' == raw_input('Ready to add customer service data to SDNC and SO DBs? This is needed only once.'
+ 'y/n: ').lower():
+ init_so_sdnc()
+ elif sys.argv[1] == 'infra':
+ if 'y' == raw_input('Ready to deploy infrastructure? y/n: ').lower():
+ deploy_infra()
+ elif sys.argv[1] == 'customer':
+ if 'y' == raw_input('Ready to deploy customer service? y/n: ').lower():
+ deploy_custom_service()
+ elif sys.argv[1] == 'loop':
+ closed_loop(22)
+ elif sys.argv[1] == 'noloss':
+ closed_loop(0)
+ elif sys.argv[1] == 'brg':
+ deploy_brg_only()
+
diff --git a/test/vcpe/vcpe_custom_service.py b/test/vcpe/vcpe_custom_service.py
new file mode 100755
index 0000000..d89129e
--- /dev/null
+++ b/test/vcpe/vcpe_custom_service.py
@@ -0,0 +1,80 @@
+#! /usr/bin/python
+
+import os
+import requests
+import time
+from vcpecommon import *
+from datetime import datetime
+import soutils
+import logging
+import preload
+import json
+
+
+class CustomService:
+ def __init__(self, vcpecommon):
+ self.logger = logging.getLogger(__name__)
+ self.vcpecommon = vcpecommon
+
+ # delete all vgw stacks
+ def del_all_vgw_stacks(self, keyword):
+ param = ' '.join([k + ' ' + v for k, v in self.vcpecommon.cloud.items()])
+ openstackcmd = 'openstack ' + param + ' '
+
+ stacks = os.popen(openstackcmd + 'stack list').read()
+ found = False
+ for stack_description in stacks.split('\n'):
+ if keyword in stack_description:
+ found = True
+ stack_name = stack_description.split('|')[2].strip()
+ cmd = openstackcmd + 'stack delete -y ' + stack_name
+ self.logger.info('Deleting ' + stack_name)
+ os.popen(cmd)
+
+ if not found:
+ self.logger.info('No vGW stack to delete')
+
+ # clean up SDNC
+ def clean_up_sdnc(self):
+ items = ['tunnelxconn-allotted-resources', 'brg-allotted-resources']
+ for res in items:
+ self.logger.info('Cleaning up ' + res + ' from SDNC')
+ requests.delete(self.vcpecommon.sdnc_ar_cleanup_url + res, auth=self.vcpecommon.sdnc_userpass)
+
+ def print_success_info(self, print_instructions=True, nodes=None):
+ if not nodes:
+ nodes = ['brg', 'mux', 'gw', 'web']
+ ip_dict = self.vcpecommon.get_vm_ip(nodes, self.vcpecommon.external_net_addr,
+ self.vcpecommon.external_net_prefix_len)
+
+ print(json.dumps(ip_dict, indent=4, sort_keys=True))
+ for node in ['brg', 'mux']:
+ print('VxLAN config in {0}:'.format(node))
+ self.vcpecommon.get_vxlan_interfaces(ip_dict[node], print_info=True)
+
+ print(json.dumps(ip_dict, indent=4, sort_keys=True))
+
+ if print_instructions:
+ print('----------------------------------------------------------------------------')
+ print('Custom service created successfully. See above for VxLAN configuration info.')
+ print('To test data plane connectivity, following the steps below.')
+ print(' 1. ssh to vGW at {0}'.format(ip_dict['gw']))
+ print(' 2. Restart DHCP: systemctl restart isc-dhcp-server')
+ print(' 3. ssh to vBRG at {0}'.format(ip_dict['brg']))
+ print(' 4. Get IP from vGW: dhclient lstack')
+ print(' 5. Add route to Internet: ip route add 10.2.0.0/24 via 192.168.1.254 dev lstack')
+ print(' 6. ping the web server: ping {0}'.format('10.2.0.10'))
+ print(' 7. wget http://{0}'.format('10.2.0.10'))
+
+ def create_custom_service(self, csar_file, vgw_template_file, preload_dict=None):
+ name_suffix = datetime.now().strftime('%Y%m%d%H%M')
+ brg_mac = self.vcpecommon.get_brg_mac_from_sdnc()
+ # preload vGW
+ if preload_dict:
+ preloader = preload.Preload(self.vcpecommon)
+ preloader.preload_vgw(vgw_template_file, brg_mac, preload_dict, name_suffix)
+
+ # create service
+ so = soutils.SoUtils(self.vcpecommon, 'v5')
+ if so.create_custom_service(csar_file, brg_mac, name_suffix):
+ self.print_success_info()
diff --git a/test/vcpe/vcpecommon.py b/test/vcpe/vcpecommon.py
new file mode 100755
index 0000000..5b3e009
--- /dev/null
+++ b/test/vcpe/vcpecommon.py
@@ -0,0 +1,414 @@
+import json
+import logging
+import os
+import pickle
+import re
+import sys
+
+import ipaddress
+import mysql.connector
+import requests
+import commands
+import time
+
+
+class VcpeCommon:
+ #############################################################################################
+ # Start: configurations that you must change for a new ONAP installation
+ external_net_addr = '10.12.0.0'
+ external_net_prefix_len = 16
+ #############################################################################################
+ # set the openstack cloud access credentials here
+ cloud = {
+ '--os-auth-url': 'http://10.12.25.2:5000',
+ '--os-username': 'YOUR ID',
+ '--os-user-domain-id': 'default',
+ '--os-project-domain-id': 'default',
+ '--os-tenant-id': '087050388b204c73a3e418dd2c1fe30b',
+ '--os-region-name': 'RegionOne',
+ '--os-password': 'YOUR PASSWD',
+ '--os-project-domain-name': 'Integration-SB-01',
+ '--os-identity-api-version': '3'
+ }
+
+ common_preload_config = {
+ 'oam_onap_net': 'oam_onap_c4Uw',
+ 'oam_onap_subnet': 'oam_onap_c4Uw',
+ 'public_net': 'external',
+ 'public_net_id': '971040b2-7059-49dc-b220-4fab50cb2ad4'
+ }
+ # End: configurations that you must change for a new ONAP installation
+ #############################################################################################
+
+ template_variable_symbol = '${'
+ #############################################################################################
+ # preloading network config
+ # key=network role
+ # value = [subnet_start_ip, subnet_gateway_ip]
+ preload_network_config = {
+ 'cpe_public': ['10.2.0.2', '10.2.0.1'],
+ 'cpe_signal': ['10.4.0.2', '10.4.0.1'],
+ 'brg_bng': ['10.3.0.2', '10.3.0.1'],
+ 'bng_mux': ['10.1.0.10', '10.1.0.1'],
+ 'mux_gw': ['10.5.0.10', '10.5.0.1']
+ }
+
+ global_subscriber_id = 'SDN-ETHERNET-INTERNET'
+
+ def __init__(self, extra_host_names=None):
+ self.logger = logging.getLogger(__name__)
+ self.logger.info('Initializing configuration')
+
+ self.host_names = ['so', 'sdnc', 'robot', 'aai-inst1', 'dcaedoks00']
+ if extra_host_names:
+ self.host_names.extend(extra_host_names)
+ # get IP addresses
+ self.hosts = self.get_vm_ip(self.host_names, self.external_net_addr, self.external_net_prefix_len)
+ # this is the keyword used to name vgw stack, must not be used in other stacks
+ self.vgw_name_keyword = 'base_vcpe_vgw'
+ self.svc_instance_uuid_file = '__var/svc_instance_uuid'
+ self.preload_dict_file = '__var/preload_dict'
+ self.vgmux_vnf_name_file = '__var/vgmux_vnf_name'
+ self.product_family_id = 'f9457e8c-4afd-45da-9389-46acd9bf5116'
+ self.custom_product_family_id = 'a9a77d5a-123e-4ca2-9eb9-0b015d2ee0fb'
+ self.instance_name_prefix = {
+ 'service': 'vcpe_svc',
+ 'network': 'vcpe_net',
+ 'vnf': 'vcpe_vnf',
+ 'vfmodule': 'vcpe_vfmodule'
+ }
+ self.aai_userpass = 'AAI', 'AAI'
+ self.pub_key = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh'
+ self.os_tenant_id = self.cloud['--os-tenant-id']
+ self.os_region_name = self.cloud['--os-region-name']
+ self.common_preload_config['pub_key'] = self.pub_key
+ self.sniro_url = 'http://' + self.hosts['robot'] + ':8080/__admin/mappings'
+ self.sniro_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+
+ #############################################################################################
+ # SDNC urls
+ self.sdnc_userpass = 'admin', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
+ self.sdnc_db_name = 'sdnctl'
+ self.sdnc_db_user = 'sdnctl'
+ self.sdnc_db_pass = 'gamma'
+ self.sdnc_db_port = '32768'
+ self.sdnc_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.sdnc_preload_network_url = 'http://' + self.hosts['sdnc'] + \
+ ':8282/restconf/operations/VNF-API:preload-network-topology-operation'
+ self.sdnc_preload_vnf_url = 'http://' + self.hosts['sdnc'] + \
+ ':8282/restconf/operations/VNF-API:preload-vnf-topology-operation'
+ self.sdnc_ar_cleanup_url = 'http://' + self.hosts['sdnc'] + ':8282/restconf/config/GENERIC-RESOURCE-API:'
+
+ #############################################################################################
+ # SO urls, note: do NOT add a '/' at the end of the url
+ self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':8080/ecomp/mso/infra/serviceInstances/v4',
+ 'v5': 'http://' + self.hosts['so'] + ':8080/ecomp/mso/infra/serviceInstances/v5'}
+ self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':8080/ecomp/mso/infra/orchestrationRequests/v2'
+ self.so_userpass = 'InfraPortalClient', 'password1$'
+ self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.so_db_name = 'mso_catalog'
+ self.so_db_user = 'root'
+ self.so_db_pass = 'password'
+ self.so_db_port = '32768'
+
+ self.vpp_inf_url = 'http://{0}:8183/restconf/config/ietf-interfaces:interfaces'
+ self.vpp_api_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.vpp_api_userpass = ('admin', 'admin')
+ self.vpp_ves_url= 'http://{0}:8183/restconf/config/vesagent:vesagent'
+
+ def headbridge(self, openstack_stack_name, svc_instance_uuid):
+ """
+ Add vserver information to AAI
+ """
+ self.logger.info('Adding vServer information to AAI for {0}'.format(openstack_stack_name))
+ cmd = '/opt/demo.sh heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid)
+ ret = commands.getstatusoutput("ssh -i onap_dev root@{0} '{1}'".format(self.hosts['robot'], cmd))
+ self.logger.debug('%s', ret)
+
+ def get_brg_mac_from_sdnc(self):
+ """
+ :return: BRG MAC address. Currently we only support one BRG instance.
+ """
+ cnx = mysql.connector.connect(user=self.sdnc_db_user, password=self.sdnc_db_pass, database=self.sdnc_db_name,
+ host=self.hosts['sdnc'], port=self.sdnc_db_port)
+ cursor = cnx.cursor()
+ query = "SELECT * from DHCP_MAP"
+ cursor.execute(query)
+
+ self.logger.debug('DHCP_MAP table in SDNC')
+ counter = 0
+ mac = None
+ for mac, ip in cursor:
+ counter += 1
+ self.logger.debug(mac + ':' + ip)
+
+ cnx.close()
+
+ if counter != 1:
+ self.logger.error('Found %s MAC addresses in DHCP_MAP', counter)
+ sys.exit()
+ else:
+ self.logger.debug('Found MAC addresses in DHCP_MAP: %s', mac)
+ return mac
+
+ def insert_into_sdnc_db(self, cmds):
+ cnx = mysql.connector.connect(user=self.sdnc_db_user, password=self.sdnc_db_pass, database=self.sdnc_db_name,
+ host=self.hosts['sdnc'], port=self.sdnc_db_port)
+ cursor = cnx.cursor()
+ for cmd in cmds:
+ self.logger.debug(cmd)
+ cursor.execute(cmd)
+ self.logger.debug('%s', cursor)
+ cnx.commit()
+ cursor.close()
+ cnx.close()
+
+ def insert_into_so_db(self, cmds):
+ cnx = mysql.connector.connect(user=self.so_db_user, password=self.so_db_pass, database=self.so_db_name,
+ host=self.hosts['so'], port=self.so_db_port)
+ cursor = cnx.cursor()
+ for cmd in cmds:
+ self.logger.debug(cmd)
+ cursor.execute(cmd)
+ self.logger.debug('%s', cursor)
+ cnx.commit()
+ cursor.close()
+ cnx.close()
+
+ def find_file(self, file_name_keyword, file_ext, search_dir):
+ """
+ :param file_name_keyword: keyword used to look for the csar file, case insensitive matching, e.g, infra
+ :param file_ext: e.g., csar, json
+ :param search_dir path to search
+ :return: path name of the file
+ """
+ file_name_keyword = file_name_keyword.lower()
+ file_ext = file_ext.lower()
+ if not file_ext.startswith('.'):
+ file_ext = '.' + file_ext
+
+ filenamepath = None
+ for file_name in os.listdir(search_dir):
+ file_name_lower = file_name.lower()
+ if file_name_keyword in file_name_lower and file_name_lower.endswith(file_ext):
+ if filenamepath:
+ self.logger.error('Multiple files found for *{0}*.{1} in '
+ 'directory {2}'.format(file_name_keyword, file_ext, search_dir))
+ sys.exit()
+ filenamepath = os.path.abspath(os.path.join(search_dir, file_name))
+
+ if filenamepath:
+ return filenamepath
+ else:
+ self.logger.error("Cannot find *{0}*{1} in directory {2}".format(file_name_keyword, file_ext, search_dir))
+ sys.exit()
+
+ @staticmethod
+ def network_name_to_subnet_name(network_name):
+ """
+ :param network_name: example: vcpe_net_cpe_signal_201711281221
+ :return: vcpe_net_cpe_signal_subnet_201711281221
+ """
+ fields = network_name.split('_')
+ fields.insert(-1, 'subnet')
+ return '_'.join(fields)
+
+ def set_network_name(self, network_name):
+ param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
+ openstackcmd = 'openstack ' + param
+ cmd = ' '.join([openstackcmd, 'network set --name', network_name, 'ONAP-NW1'])
+ os.popen(cmd)
+
+ def set_subnet_name(self, network_name):
+ """
+ Example: network_name = vcpe_net_cpe_signal_201711281221
+ set subnet name to vcpe_net_cpe_signal_subnet_201711281221
+ :return:
+ """
+ param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
+ openstackcmd = 'openstack ' + param
+
+ # expected results: | subnets | subnet_id |
+ subnet_info = os.popen(openstackcmd + ' network show ' + network_name + ' |grep subnets').read().split('|')
+ if len(subnet_info) > 2 and subnet_info[1].strip() == 'subnets':
+ subnet_id = subnet_info[2].strip()
+ subnet_name = self.network_name_to_subnet_name(network_name)
+ cmd = ' '.join([openstackcmd, 'subnet set --name', subnet_name, subnet_id])
+ os.popen(cmd)
+ self.logger.info("Subnet name set to: " + subnet_name)
+ return True
+ else:
+ self.logger.error("Can't get subnet info from network name: " + network_name)
+ return False
+
+ def is_node_in_aai(self, node_type, node_uuid):
+ key = None
+ search_node_type = None
+ if node_type == 'service':
+ search_node_type = 'service-instance'
+ key = 'service-instance-id'
+ elif node_type == 'vnf':
+ search_node_type = 'generic-vnf'
+ key = 'vnf-id'
+ else:
+ logging.error('Invalid node_type: ' + node_type)
+ sys.exit()
+
+ url = 'https://{0}:8443/aai/v11/search/nodes-query?search-node-type={1}&filter={2}:EQUALS:{3}'.format(
+ self.hosts['aai-inst1'], search_node_type, key, node_uuid)
+
+ headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-FromAppID': 'vCPE-Robot'}
+ requests.packages.urllib3.disable_warnings()
+ r = requests.get(url, headers=headers, auth=self.aai_userpass, verify=False)
+ response = r.json()
+ self.logger.debug('aai query: ' + url)
+ self.logger.debug('aai response:\n' + json.dumps(response, indent=4, sort_keys=True))
+ return 'result-data' in response
+
+ @staticmethod
+ def extract_ip_from_str(net_addr, net_addr_len, sz):
+ """
+ :param net_addr: e.g. 10.5.12.0
+ :param net_addr_len: e.g. 24
+ :param sz: a string
+ :return: the first IP address matching the network, e.g. 10.5.12.3
+ """
+ network = ipaddress.ip_network(unicode('{0}/{1}'.format(net_addr, net_addr_len)), strict=False)
+ ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', sz)
+ for ip in ip_list:
+ this_net = ipaddress.ip_network(unicode('{0}/{1}'.format(ip, net_addr_len)), strict=False)
+ if this_net == network:
+ return str(ip)
+ return None
+
+ def get_vm_ip(self, keywords, net_addr=None, net_addr_len=None):
+ """
+ :param keywords: list of keywords to search for vm, e.g. ['bng', 'gmux', 'brg']
+ :param net_addr: e.g. 10.12.5.0
+ :param net_addr_len: e.g. 24
+ :return: dictionary {keyword: ip}
+ """
+ if not net_addr:
+ net_addr = self.external_net_addr
+
+ if not net_addr_len:
+ net_addr_len = self.external_net_prefix_len
+
+ param = ' '.join([k + ' ' + v for k, v in self.cloud.items() if 'identity' not in k])
+ openstackcmd = 'nova ' + param + ' list'
+ self.logger.debug(openstackcmd)
+
+ ip_dict = {}
+ results = os.popen(openstackcmd).read()
+ for line in results.split('\n'):
+ fields = line.split('|')
+ if len(fields) == 8:
+ vm_name = fields[2]
+ ip_info = fields[-2]
+ for keyword in keywords:
+ if keyword in vm_name:
+ ip = self.extract_ip_from_str(net_addr, net_addr_len, ip_info)
+ if ip:
+ ip_dict[keyword] = ip
+ if len(ip_dict) != len(keywords):
+ self.logger.error('Cannot find all desired IP addresses for %s.', keywords)
+ self.logger.error(json.dumps(ip_dict, indent=4, sort_keys=True))
+ sys.exit()
+ return ip_dict
+
+ def del_vgmux_ves_mode(self):
+ url = self.vpp_ves_url.format(self.hosts['mux']) + '/mode'
+ r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
+ self.logger.debug('%s', r)
+
+ def del_vgmux_ves_collector(self):
+ url = self.vpp_ves_url.format(self.hosts['mux']) + '/config'
+ r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
+ self.logger.debug('%s', r)
+
+ def set_vgmux_ves_collector(self ):
+ url = self.vpp_ves_url.format(self.hosts['mux'])
+ data = {'config':
+ {'server-addr': self.hosts['dcaedoks00'],
+ 'server-port': '8080',
+ 'read-interval': '10',
+ 'is-add':'1'
+ }
+ }
+ r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
+ self.logger.debug('%s', r)
+
+ def set_vgmux_packet_loss_rate(self, lossrate, vg_vnf_instance_name):
+ url = self.vpp_ves_url.format(self.hosts['mux'])
+ data = {"mode":
+ {"working-mode": "demo",
+ "base-packet-loss": str(lossrate),
+ "source-name": vg_vnf_instance_name
+ }
+ }
+ r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
+ self.logger.debug('%s', r)
+
+ # return all the VxLAN interface names of BRG or vGMUX based on the IP address
+ def get_vxlan_interfaces(self, ip, print_info=False):
+ url = self.vpp_inf_url.format(ip)
+ self.logger.debug('url is this: %s', url)
+ r = requests.get(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
+ data = r.json()['interfaces']['interface']
+ if print_info:
+ for inf in data:
+ if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel':
+ print(json.dumps(inf, indent=4, sort_keys=True))
+
+ return [inf['name'] for inf in data if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel']
+
+ # delete all VxLAN interfaces of each hosts
+ def delete_vxlan_interfaces(self, host_dic):
+ for host, ip in host_dic.items():
+ deleted = False
+ self.logger.info('{0}: Getting VxLAN interfaces'.format(host))
+ inf_list = self.get_vxlan_interfaces(ip)
+ for inf in inf_list:
+ deleted = True
+ time.sleep(2)
+ self.logger.info("{0}: Deleting VxLAN crossconnect {1}".format(host, inf))
+ url = self.vpp_inf_url.format(ip) + '/interface/' + inf + '/v3po:l2'
+ requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
+
+ for inf in inf_list:
+ deleted = True
+ time.sleep(2)
+ self.logger.info("{0}: Deleting VxLAN interface {1}".format(host, inf))
+ url = self.vpp_inf_url.format(ip) + '/interface/' + inf
+ requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
+
+ if len(self.get_vxlan_interfaces(ip)) > 0:
+ self.logger.error("Error deleting VxLAN from {0}, try to restart the VM, IP is {1}.".format(host, ip))
+ return False
+
+ if not deleted:
+ self.logger.info("{0}: no VxLAN interface found, nothing to delete".format(host))
+ return True
+
+ @staticmethod
+ def save_object(obj, filepathname):
+ with open(filepathname, 'wb') as fout:
+ pickle.dump(obj, fout)
+
+ @staticmethod
+ def load_object(filepathname):
+ with open(filepathname, 'rb') as fin:
+ return pickle.load(fin)
+
+ def save_preload_data(self, preload_data):
+ self.save_object(preload_data, self.preload_dict_file)
+
+ def load_preload_data(self):
+ return self.load_object(self.preload_dict_file)
+
+ def save_vgmux_vnf_name(self, vgmux_vnf_name):
+ self.save_object(vgmux_vnf_name, self.vgmux_vnf_name_file)
+
+ def load_vgmux_vnf_name(self):
+ return self.load_object(self.vgmux_vnf_name_file)
+
diff --git a/version-manifest/src/main/resources/docker-manifest.csv b/version-manifest/src/main/resources/docker-manifest.csv
index 4d9a19b..f0dc078 100644
--- a/version-manifest/src/main/resources/docker-manifest.csv
+++ b/version-manifest/src/main/resources/docker-manifest.csv
@@ -29,14 +29,21 @@
onap/music,latest
onap/oof,latest
onap/oom/kube2msb,1.0.0
-onap/org.onap.dcaegen2.collectors.ves.vescollector,v1.1.0
-onap/org.onap.dcaegen2.deployments.bootstrap,v1.1.1
+onap/org.onap.dcaegen2.collectors.snmptrap,latest
+onap/org.onap.dcaegen2.collectors.ves.vescollector,latest
+onap/org.onap.dcaegen2.deployments.bootstrap,1.1.2
+onap/org.onap.dcaegen2.deployments.cm-container,latest
+onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container,latest
+onap/org.onap.dcaegen2.deployments.redis-cluster-container,latest
+onap/org.onap.dcaegen2.deployments.tca-cdap-container,latest
onap/org.onap.dcaegen2.platform.cdapbroker,v4.0.0
-onap/org.onap.dcaegen2.platform.configbinding,v1.2.0
-onap/org.onap.dcaegen2.platform.deployment-handler,v1.1.0
-onap/org.onap.dcaegen2.platform.inventory-api,v1.2.0
-onap/org.onap.dcaegen2.platform.policy-handler,v1.1.0
-onap/org.onap.dcaegen2.platform.servicechange-handler,v1.0.0
+onap/org.onap.dcaegen2.platform.configbinding,latest
+onap/org.onap.dcaegen2.platform.deployment-handler,latest
+onap/org.onap.dcaegen2.platform.inventory-api,latest
+onap/org.onap.dcaegen2.platform.policy-handler,latest
+onap/org.onap.dcaegen2.platform.servicechange-handler,latest
+onap/org.onap.dcaegen2.services.heartbeat,latest
+onap/org.onap.dcaegen2.services.prh.prh-app-server,latest
onap/policy-drools,1.2-STAGING-latest
onap/policy-pe,1.2-STAGING-latest
onap/portal-app,2.1-STAGING-latest
diff --git a/version-manifest/src/main/resources/java-manifest.csv b/version-manifest/src/main/resources/java-manifest.csv
index 4d3eda7..4bed11c 100644
--- a/version-manifest/src/main/resources/java-manifest.csv
+++ b/version-manifest/src/main/resources/java-manifest.csv
@@ -90,20 +90,24 @@
org.onap.cli,cli-validation,1.1.0
org.onap.cli,cli-zip,1.1.0
org.onap.dcaegen2,dcaegen2,1.1.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics,2.0.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics-aai,2.0.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics-cdap-common,2.0.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics-cdap-plugins,2.0.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics-cdap-tca,2.0.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics-common,2.0.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics-dmaap,2.0.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics-it,2.0.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics-model,2.0.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics-tca,2.0.0
-org.onap.dcaegen2.analytics.tca,dcae-analytics-test,2.0.0
-org.onap.dcaegen2.collectors.ves,VESCollector,1.1.4
+org.onap.dcaegen2.analytics.tca,dcae-analytics,2.2.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-aai,2.2.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-cdap-common,2.2.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-cdap-plugins,2.2.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-cdap-tca,2.2.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-common,2.2.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-dmaap,2.2.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-it,2.2.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-model,2.2.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-tca,2.2.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-test,2.2.0
+org.onap.dcaegen2.collectors.ves,VESCollector,1.2.4
org.onap.dcaegen2.platform,inventory-api,1.0.0
org.onap.dcaegen2.platform,servicechange-handler,1.0.0
+org.onap.dcaegen2.services,prh,1.0.0
+org.onap.dcaegen2.services.prh,prh-aai-client,1.0.0
+org.onap.dcaegen2.services.prh,prh-app-server,1.0.0
+org.onap.dcaegen2.services.prh,prh-dmaap-client,1.0.0
org.onap.dmaap.messagerouter.dmaapclient,dmaapClient,1.0.0
org.onap.dmaap.messagerouter.messageservice,dmaapMR1,1.0.1
org.onap.dmaap.messagerouter.mirroragent,dmaapMMAgent,1.0.0