Add onaptests_bench into integration project repository
Migrate ONAP GitLab project into integration ONAP project
Issue-ID: INT-2150
Signed-off-by: Michal Jagiello <michal.jagiello@t-mobile.pl>
Change-Id: I5748c47931c8cf37a2fc88d89e117445d6987010
diff --git a/test/onaptests_bench/MANIFEST.in b/test/onaptests_bench/MANIFEST.in
new file mode 100644
index 0000000..2ca3ee6
--- /dev/null
+++ b/test/onaptests_bench/MANIFEST.in
@@ -0,0 +1,2 @@
+recursive-include src/onaptests_bench/templates *
+recursive-include src/onaptests_bench/artifacts *
diff --git a/test/onaptests_bench/requirements.txt b/test/onaptests_bench/requirements.txt
new file mode 100644
index 0000000..61c203a
--- /dev/null
+++ b/test/onaptests_bench/requirements.txt
@@ -0,0 +1,6 @@
+pyopenssl
+kubernetes
+matplotlib
+jinja2
+docker
+xtesting
diff --git a/test/onaptests_bench/setup.cfg b/test/onaptests_bench/setup.cfg
new file mode 100644
index 0000000..b4a62e2
--- /dev/null
+++ b/test/onaptests_bench/setup.cfg
@@ -0,0 +1,22 @@
+[metadata]
+name = onaptests_bench
+version = 0.1
+description = Addon to run simultaenously several pyhtonsdk_tests basic_* tests
+author = Orange OpenSource
+license = Apache 2.0
+classifiers =
+ Programming Language :: Python :: 3
+
+[options]
+zip_safe = False
+include_package_data = True
+package_dir=
+ =src
+packages=find_namespace:
+
+[options.packages.find]
+where=src
+
+[entry_points]
+console_scripts =
+ run_stability_tests = onaptests_bench.launcher:main
diff --git a/test/onaptests_bench/setup.py b/test/onaptests_bench/setup.py
new file mode 100644
index 0000000..0dea624
--- /dev/null
+++ b/test/onaptests_bench/setup.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+
+# ============LICENSE_START=======================================================
+# Copyright (C) 2022 Orange, Ltd.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+import setuptools
+setuptools.setup(
+ setup_requires=['pbr', 'setuptools'],
+ pbr=True,
+ include_package_data=True)
diff --git a/test/onaptests_bench/src/onaptests_bench/__init__.py b/test/onaptests_bench/src/onaptests_bench/__init__.py
new file mode 100644
index 0000000..a692106
--- /dev/null
+++ b/test/onaptests_bench/src/onaptests_bench/__init__.py
@@ -0,0 +1,17 @@
+# ============LICENSE_START=======================================================
+# Copyright (C) 2022 Orange, Ltd.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
\ No newline at end of file
diff --git a/test/onaptests_bench/src/onaptests_bench/artifacts/settings.py b/test/onaptests_bench/src/onaptests_bench/artifacts/settings.py
new file mode 100644
index 0000000..b9f5c7f
--- /dev/null
+++ b/test/onaptests_bench/src/onaptests_bench/artifacts/settings.py
@@ -0,0 +1,65 @@
+"""Specific settings module."""
+
+# ============LICENSE_START=======================================================
+# Copyright (C) 2022 Orange, Ltd.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+######################
+# #
+# ONAP INPUTS DATAS #
+# #
+######################
+
+# Variables to set logger information
+# Possible values for logging levels in onapsdk: INFO, DEBUG , WARNING, ERROR
+LOG_CONFIG = {
+ "version": 1,
+ "disable_existing_loggers": False,
+ "formatters": {
+ "default": {
+ "class": "logging.Formatter",
+ "format": "%(asctime)s %(levelname)s %(lineno)d:%(filename)s(%(process)d) - %(message)s"
+ }
+ },
+ "handlers": {
+ "console": {
+ "level": "WARN",
+ "class": "logging.StreamHandler",
+ "formatter": "default"
+ },
+ "file": {
+ "level": "DEBUG",
+ "class": "logging.FileHandler",
+ "formatter": "default",
+ "filename": "/var/lib/xtesting/results/pythonsdk.debug.log",
+ "mode": "w"
+ }
+ },
+ "root": {
+ "level": "INFO",
+ "handlers": ["console", "file"]
+ }
+}
+CLEANUP_FLAG = False
+
+# SOCK_HTTP = "socks5h://127.0.0.1:8080"
+REPORTING_FILE_PATH = "/var/lib/xtesting/results/reporting.html"
+K8S_REGION_TYPE = "k8s"
+TILLER_HOST = "localhost"
+K8S_CONFIG = None # None means it will use default config (~/.kube/config)
+K8S_NAMESPACE = "onap" # Kubernetes namespace
+ORCHESTRATION_REQUEST_TIMEOUT = 60.0 * 30 # 30 minutes in seconds
diff --git a/test/onaptests_bench/src/onaptests_bench/launcher.py b/test/onaptests_bench/src/onaptests_bench/launcher.py
new file mode 100644
index 0000000..d8c311c
--- /dev/null
+++ b/test/onaptests_bench/src/onaptests_bench/launcher.py
@@ -0,0 +1,285 @@
+#!/usr/bin/env python3
+
+# ============LICENSE_START=======================================================
+# Copyright (C) 2022 Orange, Ltd.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+#
+# Launch basic_* tests in parallel and report results
+# the possible basic tests are:
+# - basic_onboarding
+# - basic_vm
+# - basic_network
+# - basic_cnf
+# - ...
+
+# Dependencies:
+# See requirements.txt
+# The dashboard is based on bulma framework
+#
+# Environment:
+#
+# Example usage:
+# python launcher.py
+# -t <test>
+# -s <nb simultaneous occurences>
+# -d <duration>
+# -r <reporting path>
+#
+# the summary html page will be generated where the script is launched
+"""
+Check ONAP certificates
+"""
+import argparse
+import logging
+import os
+import sys
+import random
+import string
+import time
+import docker # pylint: disable=import-error
+
+import onaptests_bench.reporting as Reporting
+
+HOMEPATH = os.environ.get("HOME", "/home/ubuntu")
+
+sys.path.append(f"{HOMEPATH}/onaptests_bench/src/onaptests_bench")
+
+# Logger
+LOG_LEVEL = 'INFO'
+logging.basicConfig()
+LOGGER = logging.getLogger("onaptests_bench")
+LOGGER.setLevel(LOG_LEVEL)
+TEST_LIST = ['basic_onboard', 'basic_vm', 'basic_vm_macro',
+ 'basic_network', 'basic_cnf']
+DEFAULT_TEST = TEST_LIST[0]
+DEFAULT_SIMU_TESTS = 5
+DEFAULT_TEST_DURATION = 180 # duration in minutes
+RESULT_PATH = "/tmp"
+ONAPTEST_BENCH_WAIT_TIMER = 40
+ONAPTESTS_SETTINGS = ("/usr/lib/python3.8/site-packages/onaptests" +
+ "/configuration/settings.py")
+
+CLUSTER_IP = "127.0.0.1"
+
+# Get arguments
+PARSER = argparse.ArgumentParser()
+PARSER.add_argument(
+ '-t',
+ '--test',
+ choices=TEST_LIST,
+ help=('Select your test (basic_onboard, basic_vm, basic_network, basic_cnf).' +
+ 'If not set, basic_onboarding is considered'),
+ default=DEFAULT_TEST)
+PARSER.add_argument(
+ '-s',
+ '--simu',
+ type=int,
+ help='Number of simultaneous tests',
+ default=DEFAULT_SIMU_TESTS)
+PARSER.add_argument(
+ '-d',
+ '--duration',
+ type=int,
+ help='Test duration (in minutes)',
+ default=DEFAULT_TEST_DURATION)
+PARSER.add_argument(
+ '-r',
+ '--reporting',
+ help='Result directory',
+ default=RESULT_PATH)
+PARSER.add_argument(
+ '-i',
+ '--ip',
+ help='Cluster IP',
+ default=CLUSTER_IP)
+
+ARGS = PARSER.parse_args()
+
+def prepare_test_config():
+ """Check the test execution.
+ We supposed that basic_vm tests are already available in /tmp/xtesting
+ If not the tests cannot be executed."""
+ LOGGER.info("Prepare the test, verify that the test can be run")
+
+def get_container_name():
+ """Set Container name."""
+ result_str = ''.join(random.choice(string.ascii_letters) for i in range(8))
+ container_name = ARGS.test + "_" + result_str
+ return container_name
+
+def clean_test_device(docker_client, test):
+ """Clean test resources."""
+ container_list = docker_client.containers.list(
+ all=True,
+ filters={'label':'test='+test})
+ LOGGER.info("Containers cleanup before: %s containers", len(container_list))
+
+ for container in container_list:
+ container.stop()
+ container.remove()
+
+def retrieve_onap_ip():
+ """Retrieve ONAP IP from /etc/hosts"""
+ filepath = '/etc/hosts'
+ with open(filepath) as fp_config:
+ line = fp_config.readline()
+ while line:
+ line = fp_config.readline()
+ if "so.api.simpledemo.onap.org" in line:
+ onap_ip = line.split()[0]
+ return onap_ip
+ return None
+
+def execute_test(serie_number, test_number,
+ docker_client):
+ """Execute one test."""
+ LOGGER.info("Execute test n° %s", test_number + 1)
+
+ volume_reporting = (ARGS.reporting + '/serie' + str(serie_number) +
+ '/test' + str(test_number + 1))
+ if ARGS.ip == CLUSTER_IP:
+ onap_ip = retrieve_onap_ip()
+ else:
+ onap_ip = ARGS.ip
+
+ this_container = docker_client.containers.run(
+ "nexus3.onap.org:10003/onap/xtesting-smoke-usecases-pythonsdk:master",
+ command="run_tests -t " + ARGS.test,
+ name=get_container_name(),
+ labels={"test":ARGS.test},
+ stdout=True,
+ stderr=True,
+ stream=False,
+ detach=True,
+ extra_hosts={'portal.api.simpledemo.onap.org':onap_ip,
+ 'vid.api.simpledemo.onap.org':onap_ip,
+ 'sdc.api.fe.simpledemo.onap.org':onap_ip,
+ 'sdc.api.be.simpledemo.onap.org':onap_ip,
+ 'aai.api.sparky.simpledemo.onap.org':onap_ip,
+ 'so.api.simpledemo.onap.org':onap_ip,
+ 'sdnc.api.simpledemo.onap.org':onap_ip,
+ 'sdc.workflow.plugin.simpledemo.onap.org':onap_ip,
+ 'sdc.dcae.plugin.simpledemo.onap.org':onap_ip,
+ 'msb.api.simpledemo.onap.org':onap_ip},
+ volumes={'/tmp/xtesting/smoke-usecases/' + ARGS.test + '/env':{'bind': '/var/lib/xtesting/conf/env_file', 'mode': 'rw'}, # pylint: disable=line-too-long
+ f'{HOMEPATH}/.config/openstack/clouds.yaml':{'bind': '/root/.config/openstack/clouds.yaml', 'mode': 'rw'}, # pylint: disable=line-too-long
+ volume_reporting:{'bind':'/var/lib/xtesting/results', 'mode': 'rw'},
+ f'{HOMEPATH}/.kube/config':{'bind':'/root/.kube/config', 'mode': 'rw'},
+ os.path.dirname(os.path.abspath(__file__)) + '/artifacts/settings.py':{'bind': ONAPTESTS_SETTINGS, 'mode': 'rw'}}) # pylint: disable=line-too-long
+
+ return this_container
+
+def launch_test_serie(serie_number,
+ docker_client, serie_containers):
+ """Launch a serie of n tests."""
+ for test_number in range(ARGS.simu):
+ container = execute_test(serie_number, test_number,
+ docker_client)
+ serie_containers.append(container)
+ return serie_containers
+
+def get_terminated_serie_status(running_containers):
+ """Check if the dockers in the list are terminated and get exit codes"""
+ LOGGER.info("check terminated dockers")
+ exit_codes = []
+ exit_codes.clear()
+
+ for container in running_containers:
+ try:
+ # wait for the container to finish within a certain time
+ result = container.wait(timeout=60*ONAPTEST_BENCH_WAIT_TIMER)
+ exit_code = result["StatusCode"]
+ except Exception as timeout: # pylint: disable=broad-except
+ #if the container didn't finish in the allocated time
+ # raise timeout exception and sto the container
+ LOGGER.error(timeout)
+ LOGGER.error("docker not terminating in allocated time")
+ container.stop()
+ exit_code = -1
+ LOGGER.info("exit code : %s", str(exit_code))
+ exit_codes.append(exit_code)
+ return exit_codes
+
+def generate_report():
+ """Build reporting."""
+ LOGGER.info("Generate the report")
+ test = Reporting.OnaptestBenchReporting(
+ nb_simultaneous_tests=ARGS.simu,
+ duration=ARGS.duration,
+ res_dir_path=ARGS.reporting,
+ reporting_dir=ARGS.reporting)
+ test.generate_reporting()
+
+def main():
+ """Entry point"""
+ # ***************************************************************************
+ # ***************************************************************************
+ # start of the test
+ # ***************************************************************************
+ # ***************************************************************************
+ test_client = docker.from_env()
+ serie_containers = []
+ exit_codes = []
+
+ prepare_test_config()
+
+ t_end = time.time() + 60 * float(ARGS.duration)
+
+ # clean previous container no longer used to avoid saturation
+
+
+ LOGGER.info("****************************")
+ LOGGER.info("Launch the tests")
+ LOGGER.info("Testcase: %s", ARGS.test)
+ LOGGER.info("Number of simultaneous tests : %s", ARGS.simu)
+ LOGGER.info("Test duration : %s m", ARGS.duration)
+ LOGGER.info("Reporting path : %s", ARGS.reporting)
+ LOGGER.info("****************************")
+
+ try:
+ # keep on launching series until we reached the duration expected by the tester
+ serie_number = 1
+ while time.time() < t_end:
+ clean_test_device(test_client, ARGS.test)
+ LOGGER.info("Serie : %s", str(serie_number))
+ serie_containers.clear()
+ # launch the serie
+ serie_containers = launch_test_serie(
+ serie_number,
+ test_client,
+ serie_containers)
+ LOGGER.info("Containers of serie %s created", str(serie_number))
+ exit_codes = get_terminated_serie_status(serie_containers)
+ LOGGER.info("Serie terminated")
+ LOGGER.debug(exit_codes)
+ remaining_time = int(t_end - time.time())
+ if remaining_time > 0:
+ LOGGER.info("%s s remaining, restart a serie...", remaining_time)
+ serie_number += 1
+
+ except Exception as error: # pylint: disable=broad-except
+ LOGGER.error(error)
+ LOGGER.error(">>>> Onaptests_bench FAIL")
+ LOGGER.error("do you have the correct env file?")
+ LOGGER.error("do you have the correctcluster IP?")
+ sys.exit(1)
+
+ else:
+ LOGGER.info(">>>> Onaptests_bench successfully executed")
+
+ finally:
+ generate_report()
diff --git a/test/onaptests_bench/src/onaptests_bench/reporting.py b/test/onaptests_bench/src/onaptests_bench/reporting.py
new file mode 100644
index 0000000..f464659
--- /dev/null
+++ b/test/onaptests_bench/src/onaptests_bench/reporting.py
@@ -0,0 +1,351 @@
+#!/usr/bin/env python3
+
+# ============LICENSE_START=======================================================
+# Copyright (C) 2022 Orange, Ltd.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+"""
+Aggregate test results
+"""
+import logging
+import os
+import re
+
+from dataclasses import dataclass
+from datetime import datetime
+import matplotlib.pyplot as plt # pylint: disable=import-error
+
+from jinja2 import Environment, select_autoescape, PackageLoader # pylint: disable=import-error
+
+# Logger
+LOG_LEVEL = 'INFO'
+logging.basicConfig()
+LOGGER = logging.getLogger("onaptests_bench")
+LOGGER.setLevel(LOG_LEVEL)
+
+RESULT_DIR_PATH = "/tmp/mytest"
+RESULT_LOG_FILE = "xtesting.log"
+RESULT_LOG_REPORTING_FILE = "reporting.html"
+FIGURE_NAME = "mygraph.png"
+USE_CASE_NAME = "unknwown" # could be checked with result parsing
+TIMEOUT_RUN = 1200 # parameter to be provided by the launcher
+TEST_DURATION = 120 # parameter to be provided by the launcher
+NB_SIMULTANEOUS_TESTS = 10 # parameter to be provided by the launcher
+REPORTING_DIR = "/tmp/"
+
+@dataclass
+class TestResult:
+ """Test results retrieved from xtesting."""
+ case_name: str
+ status: str = "FAIL"
+ start_date: datetime = "2000-01-01 00:00:01,123"
+ duration: int = 0
+
+@dataclass
+class SerieResult:
+ """Serie of tests."""
+ serie_id: str
+ success_rate: int = 0
+ min: int = 0
+ max: int = 0
+ mean: float = 0.0
+ median: float = 0.0
+ nb_occurences: int = 0
+
+class OnaptestBenchReporting:
+ """Build html summary page."""
+
+ def __init__(self, nb_simultaneous_tests=NB_SIMULTANEOUS_TESTS,
+ duration=TEST_DURATION,
+ res_dir_path=RESULT_DIR_PATH,
+ reporting_dir=REPORTING_DIR) -> None:
+ """Initialization of the report."""
+ self._case_name = USE_CASE_NAME
+ self._nb_simultaneous_tests = nb_simultaneous_tests
+ self._test_duration = duration
+ self._result_dir_path = res_dir_path
+ self._reporting_dir = reporting_dir
+
+ def parse_xtesting_results(self, file_result):
+ """Retrieve data from a xtesting file."""
+ # we need to retrieve:
+ # (- the name)
+ # - the start date
+ # - the status
+ # - the duration
+ # note Data could be in DB but let's aggreage based on the log to avoid
+ # dependency to the DB
+ # 2021-01-22 07:01:58,467 - xtesting.ci.run_tests - INFO - Test result:
+ #
+ # +------------------------+---------------------+------------------+----------------+
+ # | TEST CASE | PROJECT | DURATION | RESULT |
+ # +------------------------+---------------------+------------------+----------------+
+ # | basic_onboard | integration | 19:53 | PASS |
+ # +------------------------+---------------------+------------------+----------------+
+ #
+ # 2021-01-22 07:01:58 - xtesting.ci.run_tests - INFO - Execution exit value: Result.EX_OK
+ start_date = ""
+ case_name = ""
+ duration = TIMEOUT_RUN
+ status = 0
+ with open(file_result) as xtesting_result:
+ for cnt, line in enumerate(xtesting_result):
+ LOGGER.debug(cnt)
+
+ if "Running test case" in line:
+ start_date = line.split()[0] + " " + line.split()[1]
+ self._case_name = (re.search('\'(.*)\'', line)).group(1)
+
+ # if test ends properly, overwrite start tile with end time
+ # for a better display
+ if "Execution exit value" in line:
+ start_date = line.split()[0] + " " + line.split()[1]
+
+ # Look for the result table
+ if "|" in line and self._case_name in line:
+ duration_str = line.split()[5]
+ duration = int(
+ duration_str.split(":")[0])*60 + int(
+ duration_str.split(":")[1])
+ if line.split()[7] == "PASS":
+ status = 100
+ else:
+ status = 0
+
+ testresult = TestResult(
+ case_name=case_name,
+ status=status,
+ start_date=datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S,%f'),
+ duration=duration)
+ return testresult
+
+ @staticmethod
+ def calculate_stats(durations):
+ """From a duration results, retrieve the min, max, mean & median value."""
+
+ min_val = min(durations)
+ max_val = max(durations)
+
+ # Mean
+ total = sum(durations)
+ length = len(durations)
+ for nums in [durations]:
+ LOGGER.debug(nums)
+ mean_val = total / length
+
+ # Median
+ lst = sorted(durations)
+ med_val = sorted(lst)
+ lst_len = len(lst)
+ index = (lst_len - 1) // 2
+ median_val = 0
+ if lst_len % 2:
+ median_val = med_val[index]
+ else:
+ median_val = (med_val[index] + med_val[index + 1])/2.0
+
+ return min_val, max_val, mean_val, median_val
+
+ @staticmethod
+ def calculate_success_rate(criterias):
+ """Calculate Serie success rate."""
+ # calculate success rate
+ score = 0
+ for criteria in criterias:
+ score += criteria
+ try:
+ rate = score/len(criterias)
+ except ZeroDivisionError:
+ rate = 0
+ return rate
+
+
+ def parse_serie_durations(self): # pylint: disable=too-many-locals
+ """Find result series."""
+ # from the res directory find all the subdirectory and build an array of results
+ series = []
+ serie_names = []
+ serie_durations = {}
+ serie_criteria = {}
+
+ for root, dirs, files in os.walk(self._result_dir_path):
+ try:
+ dirs.sort(key=lambda x: int(x.split("/")[-1][5:]))
+ except ValueError:
+ LOGGER.debug("sort only what is sortable")
+
+ LOGGER.debug("Root: %s, Dirs: %s, Files: %s", root, dirs, files)
+
+ for name in files:
+ if name == RESULT_LOG_FILE:
+ serie_name = root.split("/")[-2]
+ # if new serie detected, initialize it
+ if serie_name not in serie_names:
+ serie_names.append(serie_name)
+ serie_durations[serie_name] = []
+ serie_criteria[serie_name] = []
+ serie_raw_results = self.parse_xtesting_results(
+ root + "/" + RESULT_LOG_FILE)
+ serie_durations[serie_name].append(
+ serie_raw_results.duration)
+ serie_criteria[serie_name].append(
+ serie_raw_results.status)
+ for serie in serie_names:
+ LOGGER.info("Calculate stats and success rate of serie %s", serie)
+ LOGGER.debug(serie_durations[serie])
+ LOGGER.debug(serie_criteria[serie])
+ # calculate stats
+ min_val, max_val, mean_val, med_val = self.calculate_stats(
+ serie_durations[serie])
+ success_rate = self.calculate_success_rate(
+ serie_criteria[serie])
+ series.append(SerieResult(
+ serie_id=serie,
+ min=min_val,
+ max=max_val,
+ mean=mean_val,
+ median=med_val,
+ success_rate=success_rate,
+ nb_occurences=len(serie_durations[serie])))
+
+ return series
+
+ def create_duration_time_serie(self):
+ """Create Histogram and scattered figure."""
+ # duration,success = f(time)
+ x_array_pass = []
+ x_array_fail = []
+ y_array_pass = []
+ y_array_fail = []
+ for root, dirs, files in os.walk(self._result_dir_path):
+ LOGGER.debug("Root: %s, Dirs: %s, Files: %s", root, dirs, files)
+ for name in files:
+ if name == RESULT_LOG_FILE:
+ serie_raw_results = self.parse_xtesting_results(
+ root + "/" + RESULT_LOG_FILE)
+ LOGGER.debug("Date %s", serie_raw_results.start_date)
+ LOGGER.debug("Status %s", serie_raw_results.status)
+ LOGGER.debug("Duration %s", serie_raw_results.duration)
+ # x_array.append(serie_raw_results.start_date)
+ if serie_raw_results.status < 100:
+ y_array_fail.append(serie_raw_results.duration)
+ x_array_fail.append(serie_raw_results.start_date)
+ else:
+ y_array_pass.append(serie_raw_results.duration)
+ x_array_pass.append(serie_raw_results.start_date)
+ plt.scatter(x_array_pass, y_array_pass, color='blue', label='PASS')
+ plt.scatter(x_array_fail, y_array_fail, color='red', label='FAIL')
+ plt.xlabel("time")
+ plt.ylabel("Duration of the test (s)")
+ plt.legend()
+ plt.savefig(self._reporting_dir + FIGURE_NAME)
+ plt.close()
+
+ # Create Histogramme
+ plt.hist(y_array_pass)
+ plt.xlabel("Duration of the test")
+ plt.ylabel("Number of tests")
+ plt.savefig(self._reporting_dir + "histo_" + FIGURE_NAME)
+ plt.close()
+
+ def create_success_rate(self, series_bench):
+ """Draw success rate = f(serie ID)"""
+ # Create a vizualisation of success rate
+ # success_rate = f(time)
+ x_array_success_rate = []
+ y_array_success_rate = []
+
+ for serie in series_bench:
+ x_array_success_rate.append(serie.serie_id)
+ y_array_success_rate.append(int(serie.success_rate))
+ LOGGER.info(" Success rate vector: %s", y_array_success_rate)
+ plt.bar(range(len(y_array_success_rate)),
+ y_array_success_rate,
+ width=0.5,
+ color='blue')
+ # plt.plot(x_array_success_rate, y_array_success_rate, '-o', color='orange')
+ plt.xlabel("Series")
+ plt.ylabel("Success rate (%)")
+ plt.savefig(self._reporting_dir + "bar_" + FIGURE_NAME)
+ plt.close()
+
+ def create_cumulated_success_rate(self, series_bench):
+ """Draw success rate = f(nb executed tests)"""
+ # Create success_rate=f(nb test executed)
+ x_array_cumulated_success_rate = []
+ y_array_cumulated_success_rate = []
+ nb_test = 0
+ nb_success_test = 0
+ for serie in series_bench:
+ # calculate the number of tests
+ nb_test += self._nb_simultaneous_tests
+ # recalculate success rate
+ nb_success_test += int(serie.success_rate)*self._nb_simultaneous_tests
+ success_rate = nb_success_test / nb_test
+ x_array_cumulated_success_rate.append(nb_test)
+ y_array_cumulated_success_rate.append(success_rate)
+ plt.plot(
+ x_array_cumulated_success_rate,
+ y_array_cumulated_success_rate,
+ '-o', color='blue')
+ plt.xlabel("Nb of executed tests")
+ plt.ylabel("Success rate (%)")
+ plt.savefig(self._reporting_dir + "rate_" + FIGURE_NAME)
+ plt.close()
+
+
+ def generate_reporting(self):
+ """Generate Serie reporting."""
+ series_bench = self.parse_serie_durations()
+ LOGGER.info(series_bench)
+
+ # create html page
+ jinja_env = Environment(
+ autoescape=select_autoescape(['html']),
+ loader=PackageLoader('onaptests_bench'))
+
+ page_info = {}
+ page_info['usecase_name'] = self._case_name
+ page_info['nb_series'] = str(len(series_bench))
+ page_info['nb_simu_tests'] = str(self._nb_simultaneous_tests)
+ page_info['test_duration'] = self._test_duration
+ page_info['nb_tests'] = self._nb_simultaneous_tests * len(series_bench)
+ success_rate_vector = []
+ min_durations = []
+ max_durations = []
+ mean_durations = []
+
+ for serie in series_bench:
+ success_rate_vector.append(int(serie.success_rate))
+ min_durations.append(int(serie.min))
+ max_durations.append(int(serie.max))
+ mean_durations.append(int(serie.mean))
+
+ page_info['global_success_rate'] = int(self.calculate_success_rate(
+ success_rate_vector))
+ page_info['min_duration'] = min(min_durations)
+ page_info['max_duration'] = max(max_durations)
+ page_info['mean_duration'] = int(
+ self.calculate_success_rate(mean_durations))
+ jinja_env.get_template(
+ 'onaptests_bench.html.j2').stream(
+ info=page_info,
+ data=series_bench).dump(
+ '{}/onaptests_bench.html'.format(self._reporting_dir))
+
+ self.create_duration_time_serie()
+ self.create_success_rate(series_bench)
+ self.create_cumulated_success_rate(series_bench)
diff --git a/test/onaptests_bench/src/onaptests_bench/templates/base.html.j2 b/test/onaptests_bench/src/onaptests_bench/templates/base.html.j2
new file mode 100644
index 0000000..cbb4e44
--- /dev/null
+++ b/test/onaptests_bench/src/onaptests_bench/templates/base.html.j2
@@ -0,0 +1,231 @@
+{% macro color(failing, total) %}
+{% if failing == 0 %}
+is-success
+{% else %}
+{% if (failing / total) <= 0.1 %}
+is-warning
+{% else %}
+is-danger
+{% endif %}
+{% endif %}
+{% endmacro %}
+
+{% macro percentage(failing, total) %}
+{{ ((total - failing) / total) | round }}
+{% endmacro %}
+
+{% macro statistic(resource_name, failing, total) %}
+{% set success = total - failing %}
+<div class="level-item has-text-centered">
+ <div>
+ <p class="heading">{{ resource_name | capitalize }}</p>
+ <p class="title">{{ success }}/{{ total }}</p>
+ <progress class="progress {{ color(failing, total) }}" value="{{ success }}" max="{{ total }}">{{ percentage(failing, total) }}</progress>
+ </div>
+ </div>
+{% endmacro %}
+
+{% macro pods_table(pods) %}
+<div id="pods" class="table-container">
+ <table class="table is-fullwidth is-striped is-hoverable">
+ <thead>
+ <tr>
+ <th>Name</th>
+ <th>Ready</th>
+ <th>Status</th>
+ <th>Reason</th>
+ <th>Restarts</th>
+ </tr>
+ </thead>
+ <tbody>
+ {% for pod in pods %}
+ <tr>
+ <td><a href="./pod-{{ pod.name }}.html" title="{{ pod.name }}">{{ pod.k8s.metadata.name }}</a></td>
+ {% if pod.init_done %}
+ <td>{{ pod.running_containers }}/{{ (pod.containers | length) }}</td>
+ {% else %}
+ <td>Init:{{ pod.runned_init_containers }}/{{ (pod.init_containers | length) }}</td>
+ {% endif %}
+ <td>{{ pod.k8s.status.phase }}</td>
+ <td>{{ pod.k8s.status.reason }}</td>
+ {% if pod.init_done %}
+ <td>{{ pod.restart_count }}</td>
+ {% else %}
+ <td>{{ pod.init_restart_count }}</td>
+ {% endif %}
+ </tr>
+ {% endfor %}
+ </tbody>
+ </table>
+</div>
+{% endmacro %}
+
+{% macro key_value_description_list(title, dict) %}
+<dt><strong>{{ title | capitalize }}:</strong></dt>
+<dd>
+ {% if dict %}
+ {% for key, value in dict.items() %}
+ {% if loop.first %}
+ <dl>
+ {% endif %}
+ <dt>{{ key }}:</dt>
+ <dd>{{ value }}</dd>
+ {% if loop.last %}
+ </dl>
+ {% endif %}
+ {% endfor %}
+ {% endif %}
+</dd>
+{% endmacro %}
+
+{% macro description(k8s) %}
+<div class="container">
+ <h1 class="title is-1">Description</h1>
+ <div class="content">
+ <dl>
+ {% if k8s.spec.type %}
+ <dt><strong>Type:</strong></dt>
+ <dd>{{ k8s.spec.type }}</dd>
+ {% if (k8s.spec.type | lower) == "clusterip" %}
+ <dt><strong>Headless:</strong></dt>
+ <dd>{% if (k8s.spec.cluster_ip | lower) == "none" %}Yes{% else %}No{% endif %}</dd>
+ {% endif %}
+ {% endif %}
+ {{ key_value_description_list('Labels', k8s.metadata.labels) | indent(width=6) }}
+ {{ key_value_description_list('Annotations', k8s.metadata.annotations) | indent(width=6) }}
+ {% if k8s.spec.selector %}
+ {% if k8s.spec.selector.match_labels %}
+ {{ key_value_description_list('Selector', k8s.spec.selector.match_labels) | indent(width=6) }}
+ {% else %}
+ {{ key_value_description_list('Selector', k8s.spec.selector) | indent(width=6) }}
+ {% endif %}
+ {% endif %}
+ {% if k8s.phase %}
+ <dt><strong>Status:</strong></dt>
+ <dd>{{ k8s.phase }}</dd>
+ {% endif %}
+ {% if k8s.metadata.owner_references %}
+ <dt><strong>Controlled By:</strong></dt>
+ <dd>{{ k8s.metadata.owner_references[0].kind }}/{{ k8s.metadata.owner_references[0].name }}</dd>
+ {% endif %}
+ </dl>
+ </div>
+</div>
+{% endmacro %}
+
+{% macro pods_container(pods, parent, has_title=True) %}
+<div class="container">
+ {% if has_title %}
+ <h1 class="title is-1">Pods</h1>
+ {% endif %}
+ {% if (pods | length) > 0 %}
+ {{ pods_table(pods) | indent(width=2) }}
+ {% else %}
+ <div class="notification is-warning">{{ parent }} has no pods!</div>
+ {% endif %}
+</div>
+{% endmacro %}
+
+{% macro two_level_breadcrumb(title, name) %}
+<section class="section">
+ <div class="container">
+ <nav class="breadcrumb" aria-label="breadcrumbs">
+ <ul>
+ <li><a href="./index.html">Summary</a></li>
+ <li class="is-active"><a href="#" aria-current="page">{{ title | capitalize }} {{ name }}</a></li>
+ </ul>
+ </nav>
+ </div>
+</section>
+{% endmacro %}
+
+{% macro pod_parent_summary(title, name, failed_pods, pods) %}
+{{ summary(title, name, [{'title': 'Pod', 'failing': failed_pods, 'total': (pods | length)}]) }}
+{% endmacro %}
+
+{% macro number_ok(number, none_value, total=None) %}
+{% if number %}
+{% if total and number < total %}
+<span class="tag is-warning">{{ number }}</span>
+{% else %}
+{{ number }}
+{% endif %}
+{% else %}
+<span class="tag is-warning">{{ none_value }}</span>
+{% endif %}
+{% endmacro %}
+
+{% macro summary(title, name, statistics) %}
+<section class="hero is-light">
+ <div class="hero-body">
+ <div class="container">
+ <h1 class="title is-1">
+ {{ title | capitalize }} {{ name }} Summary
+ </h1>
+ <nav class="level">
+ {% for stat in statistics %}
+ {% if stat.total > 0 %}
+ {{ statistic(stat.title, stat.failing, stat.total) | indent(width=8) }}
+ {% endif %}
+ {% endfor %}
+ </nav>
+ </div>
+ </div>
+</section>
+{% endmacro %}
+
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <title>Tests results - {% block title %}{% endblock %}</title>
+ <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bulma@0.9.0/css/bulma.min.css">
+ <script defer src="https://use.fontawesome.com/releases/v5.3.1/js/all.js"></script>
+ {% block more_head %}{% endblock %}
+ </head>
+ <body>
+ <nav class="navbar" role="navigation" aria-label="main navigation">
+ <div class="navbar-brand">
+ <a class="navbar-item" href="https://www.onap.org">
+ <img src="https://www.onap.org/wp-content/uploads/sites/20/2017/02/logo_onap_2017.png" width="234" height="50">
+ </a>
+
+ <a role="button" class="navbar-burger burger" aria-label="menu" aria-expanded="false" data-target="navbarBasicExample">
+ <span aria-hidden="true"></span>
+ <span aria-hidden="true"></span>
+ <span aria-hidden="true"></span>
+ </a>
+ </div>
+
+ <div id="navbarBasicExample" class="navbar-menu">
+ <div class="navbar-start">
+ <a class="navbar-item">
+ Summary
+ </a>
+ </div>
+ </div>
+ </nav>
+
+ {% block content %}{% endblock %}
+
+ <footer class="footer">
+ <div class="container">
+ <div class="columns">
+ <div class="column">
+ <p class="has-text-grey-light">
+ <a href="https://bulma.io/made-with-bulma/">
+ <img src="https://bulma.io/images/made-with-bulma.png" alt="Made with Bulma" width="128" height="24">
+ </a>
+ </div>
+ <div class="column">
+ <a class="has-text-grey" href="https://gitlab.com/Orange-OpenSource/lfn/tools/kubernetes-status" style="border-bottom: 1px solid currentColor;">
+ Improve this page on Gitlab
+ </a>
+ </p>
+ </div>
+ </div>
+ </div>
+ </footer>
+ </body>
+</html>
diff --git a/test/onaptests_bench/src/onaptests_bench/templates/onaptests_bench.html.j2 b/test/onaptests_bench/src/onaptests_bench/templates/onaptests_bench.html.j2
new file mode 100644
index 0000000..154bed2
--- /dev/null
+++ b/test/onaptests_bench/src/onaptests_bench/templates/onaptests_bench.html.j2
@@ -0,0 +1,79 @@
+{% extends "base.html.j2" %}
+{% block title %}ONAPTEST Bench{% endblock %}
+
+{% block content %}
+<h1 class="title is-1">ONAPTEST Bench</h1>
+<section class="section">
+ <div class="container">
+ <h3 class="subtitle">{{ info.usecase_name }}</h3>
+
+ <div class="block">
+ <div class="box">
+ Number of tests: {{ info.nb_tests }} <br>
+ Global success rate: {{ info.global_success_rate }} % <br>
+ Number of simultaneous tests: {{ info.nb_simu_tests }} <br>
+ Test duration: {{ info.test_duration }} m <br>
+ Number of executed series: {{ info.nb_series }} <br>
+ Min duration: {{ info.min_duration}} <br>
+ Max duration: {{ info.max_duration}} <br>
+ Mean duration: {{ info.mean_duration}} <br>
+ </div>
+</div>
+
+<div class="columns">
+ <div class="column">
+ <figure class="image">
+ <img src="./rate_mygraph.png">
+ </figure>
+ </div>
+ <div class="column">
+ <figure class="image">
+ <img src="./bar_mygraph.png">
+ </figure>
+ </div>
+ <div class="column">
+ <figure class="image">
+ <img src="./mygraph.png">
+ </figure>
+ </div>
+ <div class="column">
+ <figure class="image">
+ <img src="./histo_mygraph.png">
+ </figure>
+ </div>
+</div>
+
+<table class="table is-bordered is-striped is-narrow is-hoverable is-fullwidth">
+ <thead>
+ <tr>
+ <th><center>Serie</center></th>
+ <th><center>Success Rate</center></th>
+ <th><center>Min</center></th>
+ <th><center>Max</center></th>
+ <th><center>Mean</center></th>
+ <th><center>Median</center></th>
+ </tr>
+ </thead>
+
+ <tbody>
+
+ {% for serie in data %}
+ <tr {% if serie.success_rate >= 80 %} class="has-background-success-light" {%elif serie.success_rate > 0 %} class="has-background-warning-light" {% else %} class="has-background-danger-light" {% endif %}>
+ <td><center>{{ serie.serie_id }}</center></td>
+ <td><center>{{ serie.success_rate }}%</center></td>
+ <td><center>{{ serie.min }}</center></td>
+ <td><center>{{ serie.max }}</center></td>
+ <td><center>{{ serie.mean }}</center></td>
+ <td><center>{{ serie.median }}</center></td>
+ <tr>
+ {% endfor %}
+ </tbody>
+ </table>
+
+</div>
+
+</section>
+
+{% endblock %}
+</div>
+</section>
diff --git a/test/onaptests_bench/test-requirements.txt b/test/onaptests_bench/test-requirements.txt
new file mode 100644
index 0000000..a0679b7
--- /dev/null
+++ b/test/onaptests_bench/test-requirements.txt
@@ -0,0 +1,6 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+coverage!=4.4,>=4.0 # Apache-2.0
+flake8 # MIT
+pylint # GPLv2
diff --git a/test/onaptests_bench/tox.ini b/test/onaptests_bench/tox.ini
new file mode 100644
index 0000000..9745d4f
--- /dev/null
+++ b/test/onaptests_bench/tox.ini
@@ -0,0 +1,15 @@
+[tox]
+envlist = py3, pylint
+
+[testenv]
+deps =
+ -r{toxinidir}/requirements.txt
+
+[testenv:py3]
+commands = python {toxinidir}/setup.py develop
+
+[testenv:pylint]
+deps =
+ -r{toxinidir}/test-requirements.txt
+
+commands = pylint src