Merge "Attempt to fix CSIT Arm SIMs in logs"
diff --git a/plans/dcaegen2-collectors-hv-ves/testsuites/env_local.sh b/plans/dcaegen2-collectors-hv-ves/testsuites/env_local.sh
index e1bf2a8..280f0c4 100755
--- a/plans/dcaegen2-collectors-hv-ves/testsuites/env_local.sh
+++ b/plans/dcaegen2-collectors-hv-ves/testsuites/env_local.sh
@@ -39,5 +39,3 @@
export DCAE_APP_SIMULATOR_IMAGE="hv-collector-dcae-app-simulator"
export XNF_SIMULATOR_IMAGE="hv-ves-collector-xnf-simulator"
-
-
diff --git a/plans/dcaegen2-collectors-hv-ves/testsuites/setup.sh b/plans/dcaegen2-collectors-hv-ves/testsuites/setup.sh
index 06eaf54..73fdbc2 100755
--- a/plans/dcaegen2-collectors-hv-ves/testsuites/setup.sh
+++ b/plans/dcaegen2-collectors-hv-ves/testsuites/setup.sh
@@ -21,14 +21,15 @@
RUN_CSIT_LOCAL=${RUN_CSIT_LOCAL:-false}
+echo "Replacing obsolete 'docker-py' with 'docker' package"
+pip uninstall -y docker-py
+pip install docker
+
if ${RUN_CSIT_LOCAL} ; then
- echo "Building locally - assuming all dependencies are installed"
+ echo "Local run"
source env_local.sh
else
- echo "Default run - install all dependencies"
- pip uninstall -y docker-py
- pip install docker
-
+ echo "Default (CI) run"
COMPOSE_VERSION=1.23.2
COMPOSE_LOCATION='/usr/local/bin/docker-compose'
sudo curl -L https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m) -o ${COMPOSE_LOCATION}
diff --git a/plans/sdnc/sdnc_netconf_tls_post_deploy/certs/certs.properties b/plans/sdnc/sdnc_netconf_tls_post_deploy/certs/certs.properties
new file mode 100644
index 0000000..f8f3fa7
--- /dev/null
+++ b/plans/sdnc/sdnc_netconf_tls_post_deploy/certs/certs.properties
@@ -0,0 +1,2 @@
+keys0.zip
+*****
diff --git a/plans/sdnc/sdnc_netconf_tls_post_deploy/certs/keys0.zip b/plans/sdnc/sdnc_netconf_tls_post_deploy/certs/keys0.zip
new file mode 100644
index 0000000..48b4d90
--- /dev/null
+++ b/plans/sdnc/sdnc_netconf_tls_post_deploy/certs/keys0.zip
Binary files differ
diff --git a/plans/sdnc/sdnc_netconf_tls_post_deploy/setup.sh b/plans/sdnc/sdnc_netconf_tls_post_deploy/setup.sh
new file mode 100644
index 0000000..92470d2
--- /dev/null
+++ b/plans/sdnc/sdnc_netconf_tls_post_deploy/setup.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+# Place the scripts in run order:
+SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source ${WORKSPACE}/scripts/sdnc/script1.sh
+export DOCKER_SDNC_REPO=darby321
+export DOCKER_SDNC_TAG=testimage1
+export DOCKER_USERNAME=darby321
+export DOCKER_PASSWORD=Darragh1993
+export NEXUS_USERNAME=docker
+export NEXUS_PASSWD=docker
+export NEXUS_DOCKER_REPO=nexus3.onap.org:10001
+export DMAAP_TOPIC=AUTO
+export DOCKER_IMAGE_VERSION=1.5-STAGING-latest
+export CCSDK_DOCKER_IMAGE_VERSION=0.4-STAGING-latest
+
+export MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' | sort -n | head -1)
+
+if [ "$MTU" == "" ]; then
+ export MTU="1450"
+fi
+
+# Clone SDNC repo to get docker-compose for SDNC
+mkdir -p $WORKSPACE/archives/integration
+cd $WORKSPACE/archives
+git clone -b master --single-branch --depth=1 http://gerrit.onap.org/r/integration.git integration
+cd $WORKSPACE/archives/integration
+git pull
+HOST_IP_ADDR=localhost
+# Clone SDNC repo to get docker-compose for SDNC
+mkdir -p $WORKSPACE/archives/sdnc
+cd $WORKSPACE/archives
+git clone -b master --single-branch --depth=1 http://gerrit.onap.org/r/sdnc/oam.git sdnc
+cd $WORKSPACE/archives/sdnc
+git pull
+unset http_proxy https_proxy
+cd $WORKSPACE/archives/sdnc/installation/src/main/yaml
+
+docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD $DOCKER_SDNC_REPO
+docker pull $DOCKER_SDNC_REPO/sdnc-image:$DOCKER_SDNC_TAG
+docker tag $DOCKER_SDNC_REPO/sdnc-image:$DOCKER_SDNC_TAG onap/sdnc-image:latest
+docker logout $DOCKER_SDNC_REPO
+
+sed -i "s/DMAAP_TOPIC_ENV=.*/DMAAP_TOPIC_ENV="AUTO"/g" docker-compose.yml
+docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
+
+#Docker pull $NEXUS_DOCKER_REPO/onap/sdnc-image:$DOCKER_IMAGE_VERSION
+#docker tag $NEXUS_DOCKER_REPO/onap/sdnc-image:$DOCKER_IMAGE_VERSION onap/sdnc-image:latest
+
+docker pull $NEXUS_DOCKER_REPO/onap/sdnc-ansible-server-image:$DOCKER_IMAGE_VERSION
+docker tag $NEXUS_DOCKER_REPO/onap/sdnc-ansible-server-image:$DOCKER_IMAGE_VERSION onap/sdnc-ansible-server-image:latest
+
+docker pull $NEXUS_DOCKER_REPO/onap/ccsdk-dgbuilder-image:$CCSDK_DOCKER_IMAGE_VERSION
+docker tag $NEXUS_DOCKER_REPO/onap/ccsdk-dgbuilder-image:$CCSDK_DOCKER_IMAGE_VERSION onap/ccsdk-dgbuilder-image:latest
+
+docker pull $NEXUS_DOCKER_REPO/onap/admportal-sdnc-image:$DOCKER_IMAGE_VERSION
+docker tag $NEXUS_DOCKER_REPO/onap/admportal-sdnc-image:$DOCKER_IMAGE_VERSION onap/admportal-sdnc-image:latest
+
+docker pull $NEXUS_DOCKER_REPO/onap/sdnc-ueb-listener-image:$DOCKER_IMAGE_VERSION
+docker tag $NEXUS_DOCKER_REPO/onap/sdnc-ueb-listener-image:$DOCKER_IMAGE_VERSION onap/sdnc-ueb-listener-image:latest
+
+docker pull $NEXUS_DOCKER_REPO/onap/sdnc-dmaap-listener-image:$DOCKER_IMAGE_VERSION
+
+docker tag $NEXUS_DOCKER_REPO/onap/sdnc-dmaap-listener-image:$DOCKER_IMAGE_VERSION onap/sdnc-dmaap-listener-image:latest
+
+CERT_SUBPATH=plans/sdnc/sdnc_netconf_tls_post_deploy/certs
+export SDNC_CERT_PATH=${WORKSPACE}/${CERT_SUBPATH}
+sed -i 's/sdnc_controller_container/sdnc_controller_container\n volumes: \n - $SDNC_CERT_PATH:\/opt\/opendaylight\/current\/certs/' docker-compose.yml
+# start SDNC containers with docker compose and configuration from docker-compose.yml
+docker-compose up -d
+
+cd $WORKSPACE/archives/integration/test/mocks/pnfsimulator
+./simulator.sh start&
+
+# WAIT 10 minutes maximum and test every 5 seconds if SDNC is up using HealthCheck API
+TIME_OUT=1000
+INTERVAL=30
+TIME=0
+while [ "$TIME" -lt "$TIME_OUT" ]; do
+ response=$(curl --write-out '%{http_code}' --silent --output /dev/null -H "Authorization: Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ==" -X POST -H "X-FromAppId: csit-sdnc" -H "X-TransactionId: csit-sdnc" -H "Accept: application/json" -H "Content-Type: application/json" http://localhost:8282/restconf/operations/SLI-API:healthcheck ); echo $response
+
+ if [ "$response" == "200" ]; then
+ echo SDNC started in $TIME seconds
+ break;
+ fi
+
+ echo Sleep: $INTERVAL seconds before testing if SDNC is up. Total wait time up now is: $TIME seconds. Timeout is: $TIME_OUT seconds
+ sleep $INTERVAL
+ TIME=$(($TIME+$INTERVAL))
+done
+
+export PNF_IP=$(ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+')
+sed -i "s/pnfaddr/$PNF_IP/g" $WORKSPACE/tests/sdnc/sdnc_netconf_tls_post_deploy/data/mount.xml
+
+if [ "$TIME" -ge "$TIME_OUT" ]; then
+ echo TIME OUT: Docker containers not started in $TIME_OUT seconds... Could cause problems for testing activities...
+fi
+
+#sleep 800
+
+TIME_OUT=1500
+INTERVAL=60
+TIME=0
+while [ "$TIME" -lt "$TIME_OUT" ]; do
+docker exec sdnc_controller_container rm -f /opt/opendaylight/current/etc/host.key
+response=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client system:start-level)
+docker exec sdnc_controller_container rm -f /opt/opendaylight/current/etc/host.key
+
+ if [ "$response" == "Level 100" ] ; then
+ echo SDNC karaf started in $TIME seconds
+ break;
+ fi
+
+ echo Sleep: $INTERVAL seconds before testing if SDNC is up. Total wait time up now is: $TIME seconds. Timeout is: $TIME_OUT seconds
+ sleep $INTERVAL
+ TIME=$(($TIME+$INTERVAL))
+done
+
+if [ "$TIME" -ge "$TIME_OUT" ]; then
+ echo TIME OUT: karaf session not started in $TIME_OUT seconds... Could cause problems for testing activities...
+fi
+
+response=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client system:start-level)
+
+ if [ "$response" == "Level 100" ] ; then
+ num_failed_bundles=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client bundle:list | grep Failure | wc -l)
+ failed_bundles=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client bundle:list | grep Failure)
+ echo There is/are $num_failed_bundles failed bundles out of $num_bundles installed bundles.
+ fi
+
+if [ "$num_failed_bundles" -ge 1 ]; then
+ echo "The following bundle(s) are in a failed state: "
+ echo " $failed_bundles"
+fi
+
+# Sleep additional 5 minutes (300 secs) to give application time to finish
+sleep 200
+
+# Pass any variables required by Robot test suites in ROBOT_VARIABLES
+ROBOT_VARIABLES="-v SCRIPTS:${SCRIPTS}"
\ No newline at end of file
diff --git a/plans/sdnc/sdnc_netconf_tls_post_deploy/teardown.sh b/plans/sdnc/sdnc_netconf_tls_post_deploy/teardown.sh
new file mode 100644
index 0000000..97696ac
--- /dev/null
+++ b/plans/sdnc/sdnc_netconf_tls_post_deploy/teardown.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+
+kill-instance.sh sdnc_controller_container
+kill-instance.sh sdnc_dgbuilder_container
+kill-instance.sh sdnc_portal_container
+kill-instance.sh sdnc_db_container
+kill-instance.sh sdnc_ueblistener_container
+kill-instance.sh sdnc_dmaaplistener_container
+kill-instance.sh sdnc_ansible_container
+kill-instance.sh netopeer
+kill-instance.sh sftp-server
+kill-instance.sh ftpes-server-vsftpd
+kill-instance.sh ftpes-server-pure-ftpd
+
+# $WORKSPACE/archives/appc deleted with archives folder when tests starts so we keep it at the end for debugging
diff --git a/plans/sdnc/sdnc_netconf_tls_post_deploy/testplan.txt b/plans/sdnc/sdnc_netconf_tls_post_deploy/testplan.txt
new file mode 100644
index 0000000..a0bffe5
--- /dev/null
+++ b/plans/sdnc/sdnc_netconf_tls_post_deploy/testplan.txt
@@ -0,0 +1,4 @@
+# Test suites are relative paths under [integration/csit.git]/tests/.
+# Place the suites in run order.
+sdnc/sdnc_netconf_tls_post_deploy
+
diff --git a/plans/usecases/5G-bulkpm/composefile/docker-compose-e2e.yml b/plans/usecases/5G-bulkpm/composefile/docker-compose-e2e.yml
index f3c47bb..4f6c4da 100644
--- a/plans/usecases/5G-bulkpm/composefile/docker-compose-e2e.yml
+++ b/plans/usecases/5G-bulkpm/composefile/docker-compose-e2e.yml
@@ -20,6 +20,8 @@
interval: 10s
timeout: 10s
retries: 5
+ extra_hosts:
+ - "dmaap-dr-node:1.1.1.1"
datarouter-node:
image: nexus3.onap.org:10001/onap/dmaap/datarouter-node:2.0.2-SNAPSHOT-latest
@@ -33,6 +35,8 @@
depends_on:
datarouter-prov:
condition: service_healthy
+ extra_hosts:
+ - "dmaap-dr-prov:2.2.2.2"
datarouter-subscriber:
image: nexus3.onap.org:10001/onap/dmaap/datarouter-subscriber:2.0.2-SNAPSHOT-latest
@@ -90,18 +94,4 @@
container_name: cbs
image: nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.configbinding.app-app:latest
environment:
- CONSUL_HOST:
-
- buscontroller:
- container_name: buscontroller
- image: nexus3.onap.org:10001/onap/dmaap/dmaap-bc:latest
- ports:
- - "18080:8080"
- - "18443:8443"
- extra_hosts:
- - "dmaap-dr-prov:DMAAPDR"
- - "message-router:DMAAPMR"
- volumes:
- - /tmp/docker-databus-controller.conf:/opt/app/config/conf
- depends_on:
- - datarouter-prov
+ CONSUL_HOST:
\ No newline at end of file
diff --git a/plans/usecases/5G-bulkpm/setup.sh b/plans/usecases/5G-bulkpm/setup.sh
index 3d3a9ef..5d99b1c 100644
--- a/plans/usecases/5G-bulkpm/setup.sh
+++ b/plans/usecases/5G-bulkpm/setup.sh
@@ -76,10 +76,9 @@
docker login -u docker -p docker nexus3.onap.org:10001
docker-compose up -d
echo "Disregard the message ERROR: for datarouter-node Container 1234456 is unhealthy, this is expected behaiour at this stage"
-docker kill datarouter-prov
-docker kill datarouter-node
docker kill vescollector
docker kill cbs
+sleep 10
CONSUL_IP=$(docker inspect '--format={{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' consul )
sed -i -e '/CONSUL_HOST:/ s/:.*/: '$CONSUL_IP'/' docker-compose.yml
HOST_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $NF}')
@@ -87,6 +86,23 @@
MARIADB=$(docker inspect '--format={{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' mariadb )
sed -i 's/datarouter-mariadb/'$MARIADB'/g' $WORKSPACE/archives/dmaapdr/datarouter/datarouter-docker-compose/src/main/resources/prov_data/provserver.properties
docker-compose up -d
+sleep 5
+# Get IP address of datarrouger-prov, datarouter-node, fileconsumer-node.
+DR_PROV_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' datarouter-prov)
+DR_NODE_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' datarouter-node)
+DR_SUBSCIBER_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' fileconsumer-node)
+DR_GATEWAY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.Gateway}}{{end}}' datarouter-prov)
+
+echo DR_PROV_IP=${DR_PROV_IP}
+echo DR_NODE_IP=${DR_NODE_IP}
+echo DR_GATEWAY_IP=${DR_GATEWAY_IP}
+echo DR_SUBSCIBER_IP=${DR_SUBSCIBER_IP}
+
+docker kill datarouter-node
+docker kill datarouter-prov
+sed -i 's/1.1.1.1/'$DR_NODE_IP'/g' docker-compose.yml
+sed -i 's/2.2.2.2/'$DR_PROV_IP'/g' docker-compose.yml
+docker-compose up -d
# Wait for initialization of Docker container for datarouter-node, datarouter-prov and mariadb
for i in {1..10}; do
@@ -103,23 +119,12 @@
done
sleep 5
-# Get IP address of datarrouger-prov, datarouter-node, fileconsumer-node.
-DR_PROV_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' datarouter-prov)
-DR_NODE_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' datarouter-node)
-DR_SUBSCIBER_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' fileconsumer-node)
-DR_GATEWAY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.Gateway}}{{end}}' datarouter-prov)
-
-echo DR_PROV_IP=${DR_PROV_IP}
-echo DR_NODE_IP=${DR_NODE_IP}
-echo DR_GATEWAY_IP=${DR_GATEWAY_IP}
-echo DR_SUBSCIBER_IP=${DR_SUBSCIBER_IP}
docker exec -i datarouter-prov sh -c "curl -k -X PUT https://$DR_PROV_IP:8443/internal/api/NODES?val=dmaap-dr-node\|$DR_GATEWAY_IP"
docker exec -i datarouter-prov sh -c "curl -k -X PUT https://$DR_PROV_IP:8443/internal/api/PROV_AUTH_ADDRESSES?val=dmaap-dr-prov\|$DR_GATEWAY_IP"
-docker exec datarouter-prov /bin/sh -c "echo '${DR_NODE_IP}' dmaap-dr-node >> /etc/hosts"
-docker exec datarouter-node /bin/sh -c "echo '${DR_PROV_IP}' dmaap-dr-prov >> /etc/hosts"
docker exec datarouter-node /bin/sh -c "echo '${DR_SUBSCIBER_IP}' dmaap-dr-subscriber >> /etc/hosts"
+
# Get IP address of DMAAP, KAFKA, Zookeeper
DMAAP_MR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $DMAAP)
KAFKA_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $KAFKA)
@@ -131,11 +136,6 @@
export HOST_IP=${HOST_IP}
export DMAAP_MR_IP=${DMAAP_MR_IP}
-docker kill buscontroller
-sed -i 's/DMAAPDR/'$DR_PROV_IP'/g' docker-compose.yml
-sed -i 's/DMAAPMR/'$DMAAP_MR_IP'/g' docker-compose.yml
-docker-compose up -d
-sed -i 's/DMAAPDR/'$DR_PROV_IP'/g' /tmp/docker-databus-controller.conf
# Data File Collector configuration :
sed -i 's/DR_NODE_IP/'$DR_NODE_IP'/g' docker-compose.yml
@@ -156,13 +156,12 @@
docker restart dfc
sleep 2
-# Wait for initialization of Docker container for datarouter-node, datarouter-prov and mariadb, Consul, CBS, Buscontroller
+# Wait for initialization of Docker container for datarouter-node, datarouter-prov and mariadb, Consul, CBS
for i in {1..10}; do
if [ $(docker inspect --format '{{ .State.Running }}' consul) ] && \
- [ $(docker inspect --format '{{ .State.Running }}' cbs) ] && \
- [ $(docker inspect --format '{{ .State.Running }}' buscontroller) ]
+ [ $(docker inspect --format '{{ .State.Running }}' cbs) ]
then
- echo "Data Router, Consul, Config Binding Service, Buscontroller Services Running"
+ echo "Data Router, Consul, Config Binding Service Services Running"
break
else
echo sleep $i
diff --git a/plans/usecases/5G-bulkpm/teardown.sh b/plans/usecases/5G-bulkpm/teardown.sh
index 7a99941..78de759 100644
--- a/plans/usecases/5G-bulkpm/teardown.sh
+++ b/plans/usecases/5G-bulkpm/teardown.sh
@@ -15,5 +15,4 @@
kill-instance.sh dfc
kill-instance.sh sftp
kill-instance.sh cbs
-kill-instance.sh consul
-kill-instance.sh buscontroller
\ No newline at end of file
+kill-instance.sh consul
\ No newline at end of file
diff --git a/tests/dcaegen2-collectors-hv-ves/testcases/libraries/KafkaLibrary.py b/tests/dcaegen2-collectors-hv-ves/testcases/libraries/KafkaLibrary.py
new file mode 100644
index 0000000..e262ff0
--- /dev/null
+++ b/tests/dcaegen2-collectors-hv-ves/testcases/libraries/KafkaLibrary.py
@@ -0,0 +1,61 @@
+# ============LICENSE_START====================================
+# csit-dcaegen2-collectors-hv-ves
+# =========================================================
+# Copyright (C) 2019 Nokia. All rights reserved.
+# =========================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=====================================
+
+import docker
+from robot.api import logger
+
+KAFKA_IMAGE_FULL_NAME = "wurstmeister/kafka"
+KAFKA_ADDRESS = "kafka:9092"
+ZOOKEEPER_ADDRESS = "zookeeper:2181"
+
+LIST_TOPICS_COMMAND = "kafka-topics.sh --list --zookeeper %s" % ZOOKEEPER_ADDRESS
+TOPIC_STATUS_COMMAND = "kafka-run-class.sh kafka.tools.GetOffsetShell --broker-list " + KAFKA_ADDRESS + " --topic %s --time -1"
+DELETE_TOPIC_COMMAND = "kafka-topics.sh --zookeeper " + ZOOKEEPER_ADDRESS + " --delete --topic %s"
+
+
+class KafkaLibrary:
+
+ def log_kafka_status(self):
+ dockerClient = docker.from_env()
+ kafka = dockerClient.containers.list(filters={"ancestor": KAFKA_IMAGE_FULL_NAME}, all=True)[0]
+
+ topics = self.get_topics(kafka)
+ logger.info("Topics initialized in Kafka cluster: " + str(topics))
+ for topic in topics:
+ if topic == "__consumer_offsets":
+ # kafka-internal topic, ignore it
+ continue
+
+ self.log_topic_status(kafka, topic)
+ self.reset_topic(kafka, topic)
+
+ dockerClient.close()
+
+ def get_topics(self, kafka):
+ exitCode, output = kafka.exec_run(LIST_TOPICS_COMMAND)
+ return output.splitlines()
+
+ def log_topic_status(self, kafka, topic):
+ _, topic_status = kafka.exec_run(TOPIC_STATUS_COMMAND % topic)
+ logger.info("Messages on topic: " + str(topic_status))
+
+ def reset_topic(self, kafka, topic):
+ logger.info("Removing topic " + str(
+ topic) + " (note that it will be recreated by dcae-app-simulator/hv-ves-collector, however the offset will be reseted)")
+ _, output = kafka.exec_run(DELETE_TOPIC_COMMAND % topic)
+ logger.info(str(output))
diff --git a/tests/dcaegen2-collectors-hv-ves/testcases/resources/common-keywords.robot b/tests/dcaegen2-collectors-hv-ves/testcases/resources/common-keywords.robot
index 1c92540..07e886a 100644
--- a/tests/dcaegen2-collectors-hv-ves/testcases/resources/common-keywords.robot
+++ b/tests/dcaegen2-collectors-hv-ves/testcases/resources/common-keywords.robot
@@ -19,6 +19,7 @@
*** Settings ***
Library XnfSimulatorLibrary
Library VesHvContainersUtilsLibrary
+Library KafkaLibrary
Library Collections
*** Keywords ***
@@ -63,6 +64,7 @@
VES-HV Collector Suite Teardown
+ Log Kafka Status
Stop And Remove All Xnf Simulators ${SUITE NAME}
*** Variables ***
diff --git a/tests/sdnc/sdnc_netconf_tls_post_deploy/_init_.robot b/tests/sdnc/sdnc_netconf_tls_post_deploy/_init_.robot
new file mode 100644
index 0000000..d735306
--- /dev/null
+++ b/tests/sdnc/sdnc_netconf_tls_post_deploy/_init_.robot
@@ -0,0 +1,2 @@
+1 *** Settings ***
+2 Documentation SDNC - keystorecheck
diff --git a/tests/sdnc/sdnc_netconf_tls_post_deploy/data/mount.xml b/tests/sdnc/sdnc_netconf_tls_post_deploy/data/mount.xml
new file mode 100644
index 0000000..108369b
--- /dev/null
+++ b/tests/sdnc/sdnc_netconf_tls_post_deploy/data/mount.xml
@@ -0,0 +1,14 @@
+<node xmlns="urn:TBD:params:xml:ns:yang:network-topology">
+ <node-id>netopeer2</node-id>
+ <key-based xmlns="urn:opendaylight:netconf-node-topology">
+ <key-id xmlns="urn:opendaylight:netconf-node-topology">ODL_private_key_0</key-id>
+ <username xmlns="urn:opendaylight:netconf-node-topology">netconf</username>
+ </key-based>
+ <host xmlns="urn:opendaylight:netconf-node-topology">pnfaddr</host>
+ <port xmlns="urn:opendaylight:netconf-node-topology">6513</port>
+ <tcp-only xmlns="urn:opendaylight:netconf-node-topology">false</tcp-only>
+ <protocol xmlns="urn:opendaylight:netconf-node-topology">
+ <name xmlns="urn:opendaylight:netconf-node-topology">TLS</name>
+ </protocol>
+ <max-connection-attempts xmlns="urn:opendaylight:netconf-node-topology">2</max-connection-attempts>
+</node>
diff --git a/tests/sdnc/sdnc_netconf_tls_post_deploy/sdnc_post_deploy_cert_check.robot b/tests/sdnc/sdnc_netconf_tls_post_deploy/sdnc_post_deploy_cert_check.robot
new file mode 100644
index 0000000..f181414
--- /dev/null
+++ b/tests/sdnc/sdnc_netconf_tls_post_deploy/sdnc_post_deploy_cert_check.robot
@@ -0,0 +1,39 @@
+*** Settings ***
+Library Collections
+Library RequestsLibrary
+Library OperatingSystem
+Library json
+Library String
+
+*** Variables ***
+${SDNC_KEYSTORE_CONFIG_PATH} /config/netconf-keystore:keystore
+${SDNC_MOUNT_PATH} /config/network-topology:network-topology/topology/topology-netconf/node/netopeer2
+${PNFSIM_MOUNT_PATH} /config/network-topology:network-topology/topology/topology-netconf/node/netopeer2/yang-ext:mount/mynetconf:netconflist
+
+ *** Test Cases ***
+ Test SDNC Keystore
+ [Documentation] Checking keystore after SDNC installation
+ Create Session sdnc http://localhost:8282/restconf
+ &{headers}= Create Dictionary Authorization=Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ== Content-Type=application/json Accept=application/json
+ ${resp}= Get Request sdnc ${SDNC_KEYSTORE_CONFIG_PATH} headers=${headers}
+ Should Be Equal As Strings ${resp.status_code} 200
+ ${keystoreContent}= Convert To String ${resp.content}
+ Log to console *************************
+ Log to console ${resp.content}
+ Log to console *************************
+
+ Test SDNC PNF Mount
+ [Documentation] Checking PNF mount after SDNC installation
+ Create Session sdnc http://localhost:8282/restconf
+ ${mount}= Get File ${CURDIR}${/}data${/}mount.xml
+ Log to console ${mount}
+ &{headers}= Create Dictionary Authorization=Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ== Content-Type=application/xml Accept=application/xml
+ ${resp}= Put Request sdnc ${SDNC_MOUNT_PATH} data=${mount} headers=${headers}
+ Should Be Equal As Strings ${resp.status_code} 201
+ Sleep 30
+ &{headers1}= Create Dictionary Authorization=Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ== Content-Type=application/json Accept=application/json
+ ${resp1}= Get Request sdnc ${PNFSIM_MOUNT_PATH} headers=${headers1}
+ Should Be Equal As Strings ${resp1.status_code} 200
+ Log to console ${resp1.content}
+ Should Contain ${resp1.content} netconf-id
+ Should Contain ${resp1.content} netconf-param
\ No newline at end of file
diff --git a/tests/usecases/5G-bulkpm/BulkpmE2E.robot b/tests/usecases/5G-bulkpm/BulkpmE2E.robot
index f8ba0fb..dcf9721 100644
--- a/tests/usecases/5G-bulkpm/BulkpmE2E.robot
+++ b/tests/usecases/5G-bulkpm/BulkpmE2E.robot
@@ -20,9 +20,9 @@
${CLI_EXEC_CLI_FILECONSUMER} docker exec fileconsumer-node /bin/sh -c "ls /opt/app/subscriber/delivery | grep .xml"
${CLI_EXEC_CLI_DFC_LOG} docker exec dfc /bin/sh -c "cat /var/log/ONAP/application.log" > /tmp/dfc_docker.log.robot
${CLI_EXEC_CLI_DFC_LOG_GREP} grep "Publish to DR successful!" /tmp/dfc_docker.log.robot
+${CLI_EXEC_CLI_FILECONSUMER_CP} docker cp fileconsumer-node:/opt/app/subscriber/delivery/xNF.pm.xml.M %{WORKSPACE}
+${CLI_EXEC_RENAME_METADATA} mv %{WORKSPACE}/xNF.pm.xml.M %{WORKSPACE}/metadata.json
-${CLI_EXEC_CLI_FILECONSUMER_CP} docker cp fileconsumer-node:/opt/app/subscriber/delivery/oteNB5309_xNF.pm.xml.M %{WORKSPACE}
-${CLI_EXEC_RENAME_METADATA} mv %{WORKSPACE}/oteNB5309_xNF.pm.xml.M %{WORKSPACE}/metadata.json
${metadataSchemaPath} %{WORKSPACE}/tests/usecases/5G-bulkpm/assets/metadata.schema.json
${metadataJsonPath} %{WORKSPACE}/metadata.json
@@ -86,7 +86,7 @@
${cli_cmd_output}= Run Process ${CLI_EXEC_CLI_FILECONSUMER} shell=yes
Log ${cli_cmd_output.stdout}
Should Be Equal As Strings ${cli_cmd_output.rc} 0
- Should Contain ${cli_cmd_output.stdout} oteNB5309_xNF.pm.xml
+ Should Contain ${cli_cmd_output.stdout} xNF.pm.xml
Verify File Consumer Receive valid metadata from Data Router
[Tags] Bulk_PM_E2E_06
@@ -94,7 +94,7 @@
${cli_cmd_output}= Run Process ${CLI_EXEC_CLI_FILECONSUMER} shell=yes
Log ${cli_cmd_output.stdout}
Should Be Equal As Strings ${cli_cmd_output.rc} 0
- Should Contain ${cli_cmd_output.stdout} oteNB5309_xNF.pm.xml.M
+ Should Contain ${cli_cmd_output.stdout} xNF.pm.xml.M
${cli_cmd_output}= Run Process ${CLI_EXEC_CLI_FILECONSUMER_CP} shell=yes
${cli_cmd_output}= Run Process ${CLI_EXEC_RENAME_METADATA} shell=yes
${validation_result}= Validate ${metadataSchemaPath} ${metadataJsonPath}