Added tests and improvements

Real message router and kafka now works in docker and in kubernetes
Added test of dmaap adapter kafka jobs
Added possibility to collect runtime stats of pods/containers
Improved callback receiver to handle large payloads
Various simplifications, improvements and corrections

Issue-ID: NONRTRIC-618

Signed-off-by: BjornMagnussonXA <bjorn.magnusson@est.tech>
Change-Id: I397b4842bf860a3126cc57ddcef61bd8db3aa76b
diff --git a/test/auto-test/FTC1.sh b/test/auto-test/FTC1.sh
index 5d718b0..e4ffe75 100755
--- a/test/auto-test/FTC1.sh
+++ b/test/auto-test/FTC1.sh
@@ -24,7 +24,7 @@
 DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
+KUBE_INCLUDED_IMAGES="CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY "
 #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
 KUBE_PRESTARTED_IMAGES=""
 
@@ -119,7 +119,8 @@
             start_ric_simulators ricsim_g3 1  STD_2.0.0
         fi
 
-        start_mr
+        start_mr    "$MR_READ_TOPIC"  "/events" "users/policy-agent" \
+                    "$MR_WRITE_TOPIC" "/events" "users/mr-stub"
 
         start_cr
 
diff --git a/test/auto-test/FTC3000.sh b/test/auto-test/FTC3000.sh
index da4bf1e..4c261b4 100755
--- a/test/auto-test/FTC3000.sh
+++ b/test/auto-test/FTC3000.sh
@@ -20,10 +20,10 @@
 TC_ONELINE_DESCR="App test DMAAP Meditor and DMAAP Adapter"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR CR"
+DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR"
 
 #App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR CR"
+KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR"
 
 #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
 KUBE_PRESTARTED_IMAGES=""
@@ -81,7 +81,9 @@
 
 set_ecs_trace
 
-start_mr
+start_mr    "unauthenticated.dmaapmed.json" "/events" "dmaapmediatorproducer/STD_Fault_Messages" \
+            "unauthenticated.dmaapadp.json" "/events" "dmaapadapterproducer/msgs" \
+            "unauthenticated.dmaapadp_kafka.text" "/events" "dmaapadapterproducer/msgs"
 
 start_dmaapadp NOPROXY $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_CONFIG_FILE $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_DATA_FILE
 
@@ -93,23 +95,33 @@
 
 # Check producers
 ecs_api_idc_get_job_ids 200 NOTYPE NOWNER EMPTY
-ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages
+ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages ExampleInformationTypeKafka
 ecs_api_edp_get_producer_ids_2 200 NOTYPE DmaapGenericInfoProducer DMaaP_Mediator_Producer
 
 
-# Create jobs for adapter
+# Create jobs for adapter - CR stores data as MD5 hash
 start_timer "Create adapter jobs: $NUM_JOBS"
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i info-owner-adp-$i $CR_SERVICE_MR_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json
+    ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i"?storeas=md5" info-owner-adp-$i $CR_SERVICE_APP_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json
+
 done
 print_timer "Create adapter jobs: $NUM_JOBS"
 
-# Create jobs for mediator
+# Create jobs for adapter kafka - CR stores data as MD5 hash
+start_timer "Create adapter (kafka) jobs: $NUM_JOBS"
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    ecs_api_idc_put_job 201 job-adp-kafka-$i ExampleInformationTypeKafka $CR_SERVICE_TEXT_PATH/job-adp-kafka-data$i"?storeas=md5" info-owner-adp-kafka-$i $CR_SERVICE_APP_PATH/job_status_info-owner-adp-kafka-$i testdata/dmaap-adapter/job-template-1-kafka.json
+
+done
+print_timer "Create adapter (kafka) jobs: $NUM_JOBS"
+
+# Create jobs for mediator - CR stores data as MD5 hash
 start_timer "Create mediator jobs: $NUM_JOBS"
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i info-owner-med-$i $CR_SERVICE_MR_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json
+    ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i"?storeas=md5" info-owner-med-$i $CR_SERVICE_APP_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json
 done
 print_timer "Create mediator jobs: $NUM_JOBS"
 
@@ -118,11 +130,117 @@
 do
     ecs_api_a1_get_job_status 200 job-med-$i ENABLED 30
     ecs_api_a1_get_job_status 200 job-adp-$i ENABLED 30
+    ecs_api_a1_get_job_status 200 job-adp-kafka-$i ENABLED 30
 done
 
+
 EXPECTED_DATA_DELIV=0
 
-# Send data to adapter via mr
+mr_api_generate_json_payload_file 1 ./tmp/data_for_dmaap_test.json
+mr_api_generate_text_payload_file 1 ./tmp/data_for_dmaap_test.txt
+
+## Send json file via message-router to adapter
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from adapter
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+done
+
+
+## Send text file via message-router to adapter kafka
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from adapter kafka
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+    cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+    cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+    cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+    cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+done
+
+## Send json file via message-router to mediator
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from mediator
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+done
+
+
+# Send small json via message-router to adapter
 mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-1"}'
 mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-3"}'
 
@@ -131,9 +249,18 @@
 start_timer "Data delivery adapter, 2 json per job"
 cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
 print_timer "Data delivery adapter, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
 
-# Send data to mediator
+# Send small text via message-routere to adapter
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------1'
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------3'
+
+# Wait for data recetption, adapter kafka
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery adapte kafkar, 2 strings per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
+print_timer "Data delivery adapte kafkar, 2 strings per job"
+
+# Send small json via message-router to mediator
 mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-0"}'
 mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-2"}'
 
@@ -142,73 +269,85 @@
 start_timer "Data delivery mediator, 2 json per job"
 cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
 print_timer "Data delivery mediator, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
 
 # Check received number of messages for mediator and adapter callbacks
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    cr_equal received_callbacks?id=job-med-data$i 2
-    cr_equal received_callbacks?id=job-adp-data$i 2
+    cr_equal received_callbacks?id=job-med-data$i 7
+    cr_equal received_callbacks?id=job-adp-data$i 7
+    cr_equal received_callbacks?id=job-adp-kafka-data$i 7
 done
 
 # Check received data and order for mediator and adapter callbacks
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-0"}'
-    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-2"}'
-    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-1"}'
-    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-3"}'
+    cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-0"}'
+    cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-2"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-1"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-3"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------1'
+    cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------3'
 done
 
 # Set delay in the callback receiver to slow down callbacks
-SEC_DELAY=5
+SEC_DELAY=2
 cr_delay_callback 200 $SEC_DELAY
 
-# Send data to adapter via mr
+# Send small json via message-router to adapter
 mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-5"}'
 mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-7"}'
 
 # Wait for data recetption, adapter
 EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
-start_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job"
+start_timer "Data delivery adapter with $SEC_DELAY seconds delay in consumer, 2 json per job"
 cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300))
-print_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+print_timer "Data delivery adapter with $SEC_DELAY seconds delay in consumer, 2 json per job"
 
 
-# Send data to mediator
+# Send small text via message-router to adapter kafka
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------5'
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------7'
+
+# Wait for data recetption, adapter kafka
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery adapter kafka with $SEC_DELAY seconds delay in consumer, 2 strings per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300))
+print_timer "Data delivery adapter with kafka $SEC_DELAY seconds delay in consumer, 2 strings per job"
+
+
+# Send small json via message-router to mediator
 mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-4"}'
 mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-6"}'
 
 # Wait for data reception, mediator
 EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
-start_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job"
+start_timer "Data delivery mediator with $SEC_DELAY seconds delay in consumer, 2 json per job"
 cr_equal received_callbacks $EXPECTED_DATA_DELIV 1000
-print_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+print_timer "Data delivery mediator with $SEC_DELAY seconds delay in consumer, 2 json per job"
 
 # Check received number of messages for mediator and adapter callbacks
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    cr_equal received_callbacks?id=job-med-data$i 4
-    cr_equal received_callbacks?id=job-adp-data$i 4
+    cr_equal received_callbacks?id=job-med-data$i 9
+    cr_equal received_callbacks?id=job-adp-data$i 9
+    cr_equal received_callbacks?id=job-adp-kafka-data$i 9
 done
 
 # Check received data and order for mediator and adapter callbacks
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-4"}'
-    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-6"}'
-    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-5"}'
-    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-7"}'
+    cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-4"}'
+    cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-6"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-5"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-7"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------5'
+    cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------7'
 done
 
-
-
 #### TEST COMPLETE ####
 
 store_logs          END
 
 print_result
 
-auto_clean_environment
\ No newline at end of file
+auto_clean_environment
diff --git a/test/auto-test/ONAP_UC.sh b/test/auto-test/ONAP_UC.sh
index 15b5c5b..03697bc 100755
--- a/test/auto-test/ONAP_UC.sh
+++ b/test/auto-test/ONAP_UC.sh
@@ -23,7 +23,7 @@
 DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
+KUBE_INCLUDED_IMAGES="CP CR MR DMAAPMR PA RICSIM SDNC KUBEPROXY NGW"
 #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
 KUBE_PRESTARTED_IMAGES=""
 
@@ -99,7 +99,8 @@
 
     start_ric_simulators $RIC_SIM_PREFIX"_g3" $STD_NUM_RICS STD_2.0.0
 
-    start_mr
+    start_mr    "$MR_READ_TOPIC"  "/events" "users/policy-agent" \
+                "$MR_WRITE_TOPIC" "/events" "users/mr-stub"
 
     start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_CONFIG_FILE
 
diff --git a/test/auto-test/startMR.sh b/test/auto-test/startMR.sh
index 47b4514..27bdb4e 100755
--- a/test/auto-test/startMR.sh
+++ b/test/auto-test/startMR.sh
@@ -56,7 +56,11 @@
 
 clean_environment
 start_kube_proxy
-start_mr
+start_mr    "$MR_READ_TOPIC"  "/events" "users/policy-agent" \
+            "$MR_WRITE_TOPIC" "/events" "users/mr-stub" \
+            "unauthenticated.dmaapadp.json" "/events" "dmaapadapterproducer/msgs" \
+            "unauthenticated.dmaapmed.json" "/events" "maapmediatorproducer/STD_Fault_Messages"
+
 if [ $RUNMODE == "KUBE" ]; then
     :
 else
diff --git a/test/auto-test/testdata/dmaap-adapter/job-schema-1-kafka b/test/auto-test/testdata/dmaap-adapter/job-schema-1-kafka
new file mode 100644
index 0000000..290b70a
--- /dev/null
+++ b/test/auto-test/testdata/dmaap-adapter/job-schema-1-kafka
@@ -0,0 +1,28 @@
+{
+  "$schema": "http://json-schema.org/draft-04/schema#",
+  "type": "object",
+  "properties": {
+    "filter": {
+      "type": "string"
+    },
+    "maxConcurrency": {
+      "type": "integer"
+    },
+    "bufferTimeout": {
+      "type": "object",
+      "properties": {
+        "maxSize": {
+          "type": "integer"
+        },
+        "maxTimeMiliseconds": {
+          "type": "integer"
+        }
+      },
+      "required": [
+        "maxSize",
+        "maxTimeMiliseconds"
+      ]
+    }
+  },
+  "required": []
+}
\ No newline at end of file
diff --git a/test/auto-test/testdata/dmaap-adapter/job-template-1-kafka.json b/test/auto-test/testdata/dmaap-adapter/job-template-1-kafka.json
new file mode 100644
index 0000000..d549397
--- /dev/null
+++ b/test/auto-test/testdata/dmaap-adapter/job-template-1-kafka.json
@@ -0,0 +1,7 @@
+{
+  "maxConcurrency": 1,
+  "bufferTimeout": {
+      "maxSize": 1,
+      "maxTimeMiliseconds": 0
+  }
+}
\ No newline at end of file
diff --git a/test/common/README.md b/test/common/README.md
index 18b9656..3577cfa 100644
--- a/test/common/README.md
+++ b/test/common/README.md
@@ -153,6 +153,7 @@
 | `--print-stats` |  Prints the number of tests, failed tests, failed configuration and deviations after each individual test or config |
 | `--override <file>` |  Override setting from the file supplied by --env-file |
 | `--pre-clean` |  Clean kube resouces when running docker and vice versa |
+| `--gen-stats`  | Collect container/pod runtime statistics |
 | `help` | Print this info along with the test script description and the list of app short names supported |
 
 ## Function: setup_testenvironment ##
diff --git a/test/common/agent_api_functions.sh b/test/common/agent_api_functions.sh
index a1fd657..4cedad1 100644
--- a/test/common/agent_api_functions.sh
+++ b/test/common/agent_api_functions.sh
@@ -91,6 +91,19 @@
 	use_agent_rest_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PA_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "PA $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+	else
+		echo "PA $POLICY_AGENT_APP_NAME"
+	fi
+}
+
+
 #######################################################
 
 ###########################
diff --git a/test/common/api_curl.sh b/test/common/api_curl.sh
index 17f80a5..f2777eb 100644
--- a/test/common/api_curl.sh
+++ b/test/common/api_curl.sh
@@ -23,7 +23,8 @@
 # one for sending the requests and one for receiving the response
 # but only when using the DMAAP interface
 # REST or DMAAP is controlled of the base url of $XX_ADAPTER
-# arg: (PA|ECS|CR|RC GET|PUT|POST|DELETE|GET_BATCH|PUT_BATCH|POST_BATCH|DELETE_BATCH <url>|<correlation-id> [<file>]) | (PA|ECS RESPONSE <correlation-id>)
+# arg: (PA|ECS|CR|RC GET|PUT|POST|DELETE|GET_BATCH|PUT_BATCH|POST_BATCH|DELETE_BATCH <url>|<correlation-id> [<file> [mime-type]]) | (PA|ECS RESPONSE <correlation-id>)
+# Default mime type for file is application/json unless specified in parameter mime-type
 # (Not for test scripts)
 __do_curl_to_api() {
 	TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
@@ -39,6 +40,7 @@
 
 	paramError=0
 	input_url=$3
+	fname=$4
     if [ $# -gt 0 ]; then
         if [ $1 == "PA" ]; then
 			__ADAPTER=$PA_ADAPTER
@@ -75,17 +77,21 @@
 			__ADAPTER=$MR_STUB_ADAPTER
 			__ADAPTER_TYPE=$MR_STUB_ADAPTER_TYPE
             __RETRY_CODES=""
-        else
+        elif [ $1 == "DMAAPMR" ]; then
+			__ADAPTER=$MR_DMAAP_ADAPTER_HTTP
+			__ADAPTER_TYPE=$MR_DMAAP_ADAPTER_TYPE
+            __RETRY_CODES=""
+		else
             paramError=1
         fi
-		if [ $__ADAPTER_TYPE == "MR-HTTP" ]; then
+		if [ "$__ADAPTER_TYPE" == "MR-HTTP" ]; then
 			__ADAPTER=$MR_ADAPTER_HTTP
 		fi
-		if [ $__ADAPTER_TYPE == "MR-HTTPS" ]; then
+		if [ "$__ADAPTER_TYPE" == "MR-HTTPS" ]; then
 			__ADAPTER=$MR_ADAPTER_HTTPS
 		fi
     fi
-    if [ $# -lt 3 ] || [ $# -gt 4 ]; then
+    if [ $# -lt 3 ] || [ $# -gt 5 ]; then
 		paramError=1
     else
 		timeout=""
@@ -100,6 +106,10 @@
 		fi
 		if [ $# -gt 3 ]; then
 			content=" -H Content-Type:application/json"
+			fname=$4
+			if [ $# -gt 4 ]; then
+				content=" -H Content-Type:"$5
+			fi
 		fi
 		if [ $2 == "GET" ] || [ $2 == "GET_BATCH" ]; then
 			oper="GET"
@@ -108,15 +118,15 @@
 			fi
 		elif [ $2 == "PUT" ] || [ $2 == "PUT_BATCH" ]; then
 			oper="PUT"
-			if [ $# -eq 4 ]; then
-				file=" --data-binary @$4"
+			if [ $# -gt 3 ]; then
+				file=" --data-binary @$fname"
 			fi
 			accept=" -H accept:application/json"
 		elif [ $2 == "POST" ] || [ $2 == "POST_BATCH" ]; then
 			oper="POST"
 			accept=" -H accept:*/*"
-			if [ $# -eq 4 ]; then
-				file=" --data-binary @$4"
+			if [ $# -gt 3 ]; then
+				file=" --data-binary @$fname"
 				accept=" -H accept:application/json"
 			fi
 		elif [ $2 == "DELETE" ] || [ $2 == "DELETE_BATCH" ]; then
@@ -153,8 +163,8 @@
         oper=" -X "$oper
         curlString="curl -k $proxyflag "${oper}${timeout}${httpcode}${accept}${content}${url}${file}
         echo " CMD: "$curlString >> $HTTPLOG
-		if [ $# -eq 4 ]; then
-			echo " FILE: $(<$4)" >> $HTTPLOG
+		if [ $# -gt 3 ]; then
+			echo " FILE: $(<$fname)" >> $HTTPLOG
 		fi
 
 		# Do retry for configured response codes, otherwise only one attempt
@@ -190,12 +200,12 @@
     else
 		if [ $oper != "RESPONSE" ]; then
 			requestUrl=$input_url
-			if [ $2 == "PUT" ] && [ $# -eq 4 ]; then
-				payload="$(cat $4 | tr -d '\n' | tr -d ' ' )"
+			if [ $2 == "PUT" ] && [ $# -gt 3 ]; then
+				payload="$(cat $fname | tr -d '\n' | tr -d ' ' )"
 				echo "payload: "$payload >> $HTTPLOG
 				file=" --data-binary "$payload
-			elif [ $# -eq 4 ]; then
-				echo " FILE: $(cat $4)" >> $HTTPLOG
+			elif [ $# -gt 3 ]; then
+				echo " FILE: $(cat $fname)" >> $HTTPLOG
 			fi
 			#urlencode the request url since it will be carried by send-request url
 			requestUrl=$(python3 -c "from __future__ import print_function; import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1]))"  "$input_url")
diff --git a/test/common/consul_cbs_functions.sh b/test/common/consul_cbs_functions.sh
index 747eaab..cd1b16c 100644
--- a/test/common/consul_cbs_functions.sh
+++ b/test/common/consul_cbs_functions.sh
@@ -165,6 +165,21 @@
 	CBS_SERVICE_PATH="http://"$CBS_APP_NAME":"$CBS_INTERNAL_PORT
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CONSUL_statisics_setup() {
+	echo ""
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CBS_statisics_setup() {
+	echo ""
+}
 #######################################################
 
 
diff --git a/test/common/control_panel_api_functions.sh b/test/common/control_panel_api_functions.sh
index eda6fe3..295e16a 100644
--- a/test/common/control_panel_api_functions.sh
+++ b/test/common/control_panel_api_functions.sh
@@ -91,6 +91,19 @@
 __CP_initial_setup() {
 	use_control_panel_http
 }
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CP_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "CP $CONTROL_PANEL_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+	else
+		echo "CP $CONTROL_PANEL_APP_NAME"
+	fi
+}
+
 #######################################################
 
 
diff --git a/test/common/controller_api_functions.sh b/test/common/controller_api_functions.sh
index 4027f30..b3ef07b 100644
--- a/test/common/controller_api_functions.sh
+++ b/test/common/controller_api_functions.sh
@@ -73,7 +73,7 @@
 # All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
 # This function is called for apps fully managed by the test script
 __SDNC_kube_scale_zero() {
-	__kube_scale_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC
+	__kube_scale_all_resources $KUBE_SDNC_NAMESPACE autotest SDNC
 }
 
 # Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
@@ -85,7 +85,7 @@
 # Delete all kube resouces for the app
 # This function is called for apps managed by the test script.
 __SDNC_kube_delete_all() {
-	__kube_delete_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC
+	__kube_delete_all_resources $KUBE_SDNC_NAMESPACE autotest SDNC
 }
 
 # Store docker logs
@@ -93,9 +93,9 @@
 # args: <log-dir> <file-prexix>
 __SDNC_store_docker_logs() {
 	if [ $RUNMODE == "KUBE" ]; then
-		kubectl  logs -l "autotest=SDNC" -n $KUBE_SNDC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
-		podname=$(kubectl get pods -n $KUBE_SNDC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
-		kubectl exec -t -n $KUBE_SNDC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
+		kubectl  logs -l "autotest=SDNC" -n $KUBE_SDNC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
+		podname=$(kubectl get pods -n $KUBE_SDNC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
+		kubectl exec -t -n $KUBE_SDNC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
 	else
 		docker exec -t $SDNC_APP_NAME cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
 	fi
@@ -108,6 +108,18 @@
 	use_sdnc_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__SDNC_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "SDNC $SDNC_APP_NAME $KUBE_SDNC_NAMESPACE"
+	else
+		echo "SDNC $SDNC_APP_NAME"
+	fi
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to SDNC
@@ -135,8 +147,8 @@
 	SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME":"$2  # docker access, container->container and script->container via proxy
 	SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME":"$1$SDNC_API_URL
 	if [ $RUNMODE == "KUBE" ]; then
-		SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SNDC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
-		SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SNDC_NAMESPACE":"$1$SDNC_API_URL
+		SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SDNC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+		SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SDNC_NAMESPACE":"$1$SDNC_API_URL
 	fi
 	echo ""
 
@@ -145,7 +157,7 @@
 # Export env vars for config files, docker compose and kube resources
 # args:
 __sdnc_export_vars() {
-	export KUBE_SNDC_NAMESPACE
+	export KUBE_SDNC_NAMESPACE
 	export DOCKER_SIM_NWNAME
 
 	export SDNC_APP_NAME
@@ -199,7 +211,7 @@
 		if [ $retcode_p -eq 0 ]; then
 			echo -e " Using existing $SDNC_APP_NAME deployment and service"
 			echo " Setting SDNC replicas=1"
-			__kube_scale deployment $SDNC_APP_NAME $KUBE_SNDC_NAMESPACE 1
+			__kube_scale deployment $SDNC_APP_NAME $KUBE_SDNC_NAMESPACE 1
 		fi
 
 				# Check if app shall be fully managed by the test script
@@ -208,7 +220,7 @@
 			echo -e " Creating $SDNC_APP_NAME app and expose service"
 
 			#Check if namespace exists, if not create it
-			__kube_create_namespace $KUBE_SNDC_NAMESPACE
+			__kube_create_namespace $KUBE_SDNC_NAMESPACE
 
 			__sdnc_export_vars
 
diff --git a/test/common/cr_api_functions.sh b/test/common/cr_api_functions.sh
index ba46510..a537bc8 100644
--- a/test/common/cr_api_functions.sh
+++ b/test/common/cr_api_functions.sh
@@ -107,6 +107,18 @@
 	use_cr_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CR_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "CR $CR_APP_NAME $KUBE_SIM_NAMESPACE"
+	else
+		echo "CR $CR_APP_NAME"
+	fi
+}
+
 #######################################################
 
 ################
@@ -142,6 +154,7 @@
 	fi
 	# Service paths are used in test script to provide callbacck urls to app
 	CR_SERVICE_MR_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_MR  #Only for messages from dmaap adapter/mediator
+	CR_SERVICE_TEXT_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_TEXT  #Callbacks for text payload
 	CR_SERVICE_APP_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK    #For general callbacks from apps
 
 	# CR_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
@@ -573,6 +586,10 @@
 	body=${res:0:${#res}-3}
 	targetJson=$3
 
+	if [ $targetJson == "EMPTY" ] && [ ${#body} -ne 0 ]; then
+		__log_test_fail_body
+		return 1
+	fi
 	echo " TARGET JSON: $targetJson" >> $HTTPLOG
 	res=$(python3 ../common/compare_json.py "$targetJson" "$body")
 
@@ -583,4 +600,124 @@
 
 	__log_test_pass
 	return 0
+}
+
+# CR API: Check a single (oldest) json in md5 format (or none if empty) for path.
+# Note that if a json message is given, it shall be compact, no ws except inside string.
+# The MD5 will generate different hash if ws is present or not in otherwise equivalent json
+# arg: <response-code> <topic-url> (EMPTY | <data-msg> )
+# (Function for test scripts)
+cr_api_check_single_genric_event_md5() {
+	__log_test_start $@
+
+	if [ $# -ne 3 ]; then
+		__print_err "<response-code> <topic-url> (EMPTY | <data-msg> )" $@
+		return 1
+	fi
+
+	query="/get-event/"$2
+	res="$(__do_curl_to_api CR GET $query)"
+	status=${res:${#res}-3}
+
+	if [ $status -ne $1 ]; then
+		__log_test_fail_status_code $1 $status
+		return 1
+	fi
+	body=${res:0:${#res}-3}
+	if [ $3 == "EMPTY" ]; then
+		if [ ${#body} -ne 0 ]; then
+			__log_test_fail_body
+			return 1
+		else
+			__log_test_pass
+			return 0
+		fi
+	fi
+	command -v md5 > /dev/null # Mac
+	if [ $? -eq 0 ]; then
+		targetMd5=$(echo -n "$3" | md5)
+	else
+		command -v md5sum > /dev/null # Linux
+		if [ $? -eq 0 ]; then
+			targetMd5=$(echo -n "$3" | md5sum | cut -d' ' -f 1)  # Need to cut additional info printed by cmd
+		else
+			__log_test_fail_general "Command md5 nor md5sum is available"
+			return 1
+		fi
+	fi
+	targetMd5="\""$targetMd5"\"" #Quotes needed
+
+	echo " TARGET MD5 hash: $targetMd5" >> $HTTPLOG
+
+	if [ "$body" != "$targetMd5" ]; then
+		__log_test_fail_body
+		return 1
+	fi
+
+	__log_test_pass
+	return 0
+}
+
+# CR API: Check a single (oldest) event in md5 format (or none if empty) for path.
+# Note that if a file with json message is given, the json shall be compact, no ws except inside string and not newlines.
+# The MD5 will generate different hash if ws/newlines is present or not in otherwise equivalent json
+# arg: <response-code> <topic-url> (EMPTY | <data-file> )
+# (Function for test scripts)
+cr_api_check_single_genric_event_md5_file() {
+	__log_test_start $@
+
+	if [ $# -ne 3 ]; then
+		__print_err "<response-code> <topic-url> (EMPTY | <data-file> )" $@
+		return 1
+	fi
+
+	query="/get-event/"$2
+	res="$(__do_curl_to_api CR GET $query)"
+	status=${res:${#res}-3}
+
+	if [ $status -ne $1 ]; then
+		__log_test_fail_status_code $1 $status
+		return 1
+	fi
+	body=${res:0:${#res}-3}
+	if [ $3 == "EMPTY" ]; then
+		if [ ${#body} -ne 0 ]; then
+			__log_test_fail_body
+			return 1
+		else
+			__log_test_pass
+			return 0
+		fi
+	fi
+
+	if [ ! -f $3 ]; then
+		__log_test_fail_general "File $3 does not exist"
+		return 1
+	fi
+
+	filedata=$(cat $3)
+
+	command -v md5 > /dev/null # Mac
+	if [ $? -eq 0 ]; then
+		targetMd5=$(echo -n "$filedata" | md5)
+	else
+		command -v md5sum > /dev/null # Linux
+		if [ $? -eq 0 ]; then
+			targetMd5=$(echo -n "$filedata" | md5sum | cut -d' ' -f 1)  # Need to cut additional info printed by cmd
+		else
+			__log_test_fail_general "Command md5 nor md5sum is available"
+			return 1
+		fi
+	fi
+	targetMd5="\""$targetMd5"\""   #Quotes needed
+
+	echo " TARGET MD5 hash: $targetMd5" >> $HTTPLOG
+
+	if [ "$body" != "$targetMd5" ]; then
+		__log_test_fail_body
+		return 1
+	fi
+
+	__log_test_pass
+	return 0
 }
\ No newline at end of file
diff --git a/test/common/dmaapadp_api_functions.sh b/test/common/dmaapadp_api_functions.sh
index 26da2d0..9b7571f 100644
--- a/test/common/dmaapadp_api_functions.sh
+++ b/test/common/dmaapadp_api_functions.sh
@@ -92,6 +92,18 @@
 	use_dmaapadp_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPADP_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "DMAAPADP $DMAAP_ADP_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+	else
+		echo "DMAAPADP $DMAAP_ADP_APP_NAME"
+	fi
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Dmaap adapter
diff --git a/test/common/dmaapmed_api_functions.sh b/test/common/dmaapmed_api_functions.sh
index 16e1ad7..5188a45 100644
--- a/test/common/dmaapmed_api_functions.sh
+++ b/test/common/dmaapmed_api_functions.sh
@@ -92,6 +92,18 @@
 	use_dmaapmed_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPMED_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "DMAAPMED $DMAAP_MED_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+	else
+		echo "DMAAPMED $DMAAP_MED_APP_NAME"
+	fi
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Dmaap mediator
diff --git a/test/common/ecs_api_functions.sh b/test/common/ecs_api_functions.sh
index 2b434f1..b28c061 100644
--- a/test/common/ecs_api_functions.sh
+++ b/test/common/ecs_api_functions.sh
@@ -91,6 +91,18 @@
 	use_ecs_rest_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__ECS_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "ECS $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+	else
+		echo "ECS $ECS_APP_NAME"
+	fi
+}
+
 #######################################################
 
 
diff --git a/test/common/gateway_api_functions.sh b/test/common/gateway_api_functions.sh
index ee617ef..d8f1707 100644
--- a/test/common/gateway_api_functions.sh
+++ b/test/common/gateway_api_functions.sh
@@ -92,6 +92,18 @@
 	use_gateway_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__NGW_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "NGW $NRT_GATEWAY_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+	else
+		echo "NGW $NRT_GATEWAY_APP_NAME"
+	fi
+}
+
 #######################################################
 
 
diff --git a/test/common/genstat.sh b/test/common/genstat.sh
new file mode 100755
index 0000000..3c329d9
--- /dev/null
+++ b/test/common/genstat.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+# This script collects container statistics to a file. Data is separated with semicolon.
+# Works for both docker container and kubernetes pods.
+# Relies on 'docker stats' so will not work for other container runtimes.
+# Used by the test env.
+
+# args: docker <start-time-seconds> <log-file> <app-short-name> <app-name> [ <app-short-name> <app-name> ]*
+# or
+# args: kube <start-time-seconds> <log-file> <app-short-name> <app-name> <namespace> [ <app-short-name> <app-name> <namespace> ]*
+
+print_usage() {
+  echo "Usage: genstat.sh DOCKER <start-time-seconds> <log-file> <app-short-name> <app-name> [ <app-short-name> <app-name> ]*"
+  echo "or"
+  echo "Usage: genstat.sh KUBE <start-time-seconds> <log-file> <app-short-name> <app-name> <namespace> [ <app-short-name> <app-name> <namespace> ]*"
+}
+
+STARTTIME=-1
+
+if [ $# -lt 4 ]; then
+  print_usage
+  exit 1
+fi
+if [ $1 == "DOCKER" ]; then
+  STAT_TYPE=$1
+  shift
+  STARTTIME=$1
+  shift
+  LOGFILE=$1
+  shift
+  if [ $(($#%2)) -ne 0 ]; then
+    print_usage
+    exit 1
+  fi
+elif [ $1 == "KUBE" ]; then
+  STAT_TYPE=$1
+  shift
+  STARTTIME=$1
+  shift
+  LOGFILE=$1
+  shift
+  if [ $(($#%3)) -ne 0 ]; then
+    print_usage
+    exit 1
+  fi
+else
+  print_usage
+  exit 1
+fi
+
+
+echo "Time;Name;PIDS;CPU perc;Mem perc" > $LOGFILE
+
+if [ "$STARTTIME" -ne -1 ]; then
+    STARTTIME=$(($SECONDS-$STARTTIME))
+fi
+
+while [ true ]; do
+  docker stats --no-stream --format "table {{.Name}};{{.PIDs}};{{.CPUPerc}};{{.MemPerc}}" > tmp/.tmp_stat_out.txt
+  if [ "$STARTTIME" -eq -1 ]; then
+    STARTTIME=$SECONDS
+  fi
+  CTIME=$(($SECONDS-$STARTTIME))
+
+  TMP_APPS=""
+
+  while read -r line; do
+    APP_LIST=(${@})
+    if [ $STAT_TYPE == "DOCKER" ]; then
+      for ((i=0; i<$#; i=i+2)); do
+        SAPP=${APP_LIST[$i]}
+        APP=${APP_LIST[$i+1]}
+        d=$(echo $line | grep -v "k8s" | grep $APP)
+        if [ ! -z $d ]; then
+          d=$(echo $d | cut -d';' -f 2- | sed -e 's/%//g' | sed 's/\./,/g')
+          echo "$SAPP;$CTIME;$d" >> $LOGFILE
+          TMP_APPS=$TMP_APPS" $SAPP "
+        fi
+      done
+    else
+      for ((i=0; i<$#; i=i+3)); do
+        SAPP=${APP_LIST[$i]}
+        APP=${APP_LIST[$i+1]}
+        NS=${APP_LIST[$i+2]}
+        d=$(echo "$line" | grep -v "k8s_POD" | grep "k8s" | grep $APP | grep $NS)
+        if [ ! -z "$d" ]; then
+          d=$(echo "$d" | cut -d';' -f 2- | sed -e 's/%//g' | sed 's/\./,/g')
+          data="$SAPP-$NS;$CTIME;$d"
+          echo $data >> $LOGFILE
+          TMP_APPS=$TMP_APPS" $SAPP-$NS "
+        fi
+      done
+    fi
+  done < tmp/.tmp_stat_out.txt
+
+  APP_LIST=(${@})
+  if [ $STAT_TYPE == "DOCKER" ]; then
+    for ((i=0; i<$#; i=i+2)); do
+      SAPP=${APP_LIST[$i]}
+      APP=${APP_LIST[$i+1]}
+      if [[ $TMP_APPS != *" $SAPP "* ]]; then
+        data="$SAPP;$CTIME;0;0,00;0,00"
+        echo $data >> $LOGFILE
+      fi
+    done
+  else
+    for ((i=0; i<$#; i=i+3)); do
+      SAPP=${APP_LIST[$i]}
+      APP=${APP_LIST[$i+1]}
+      NS=${APP_LIST[$i+2]}
+      if [[ $TMP_APPS != *" $SAPP-$NS "* ]]; then
+        data="$SAPP-$NS;$CTIME;0;0,00;0,00"
+        echo $data >> $LOGFILE
+      fi
+    done
+  fi
+  sleep 1
+done
diff --git a/test/common/http_proxy_api_functions.sh b/test/common/http_proxy_api_functions.sh
index 56ce6d4..3378a1d 100644
--- a/test/common/http_proxy_api_functions.sh
+++ b/test/common/http_proxy_api_functions.sh
@@ -106,6 +106,18 @@
 	:
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__HTTPPROXY_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "HTTPPROXY $HTTP_PROXY_APP_NAME $KUBE_SIM_NAMESPACE"
+	else
+		echo "HTTPPROXY $HTTP_PROXY_APP_NAME"
+	fi
+}
+
 #######################################################
 
 
diff --git a/test/common/kube_proxy_api_functions.sh b/test/common/kube_proxy_api_functions.sh
index dcaaf80..eb4600c 100644
--- a/test/common/kube_proxy_api_functions.sh
+++ b/test/common/kube_proxy_api_functions.sh
@@ -107,6 +107,18 @@
 	use_kube_proxy_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__KUBEPROXY_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "KUBEPROXXY $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE"
+	else
+		echo "KUBEPROXXY $KUBE_PROXY_APP_NAME"
+	fi
+}
+
 #######################################################
 
 ## Access to Kube http proxy
diff --git a/test/common/mr_api_functions.sh b/test/common/mr_api_functions.sh
index c6a5a2c..da3e34d 100755
--- a/test/common/mr_api_functions.sh
+++ b/test/common/mr_api_functions.sh
@@ -193,19 +193,84 @@
 	:  # handle by __MR_initial_setup
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__MR_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "MR $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE"
+	else
+		echo "MR $MR_STUB_APP_NAME"
+	fi
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPMR_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo ""
+	else
+		echo ""
+	fi
+}
 
 #######################################################
 
+# Description of port mappings when running MR-STUB only or MR-STUB + MESSAGE-ROUTER
+#
+# 'MR-STUB only' is started when only 'MR' is included in the test script. Both the test scripts and app will then use MR-STUB as a message-router simulator.
+#
+# 'MR-STUB + MESSAGE-ROUTER' is started when 'MR' and 'DMAAPMR' is included in the testscripts. DMAAPMR is the real message router including kafka and zookeeper.
+# In this configuration, MR-STUB is used by the test-script as frontend to the message-router while app are using the real message-router.
+#
+# DOCKER                                                                      KUBE
+# ---------------------------------------------------------------------------------------------------------------------------------------------------
+
+#                             MR-STUB                                                             MR-STUB
+#                             +++++++                                                             +++++++
+# localhost                               container                           service                                 pod
+# ==============================================================================================================================================
+# 10 MR_STUB_LOCALHOST_PORT          ->   13 MR_INTERNAL_PORT                 15 MR_EXTERNAL_PORT                ->   17 MR_INTERNAL_PORT
+# 12 MR_STUB_LOCALHOST_SECURE_PORT   ->   14 MR_INTERNAL_SECURE_PORT          16 MR_EXTERNAL_SECURE_PORT		 ->   18 MR_INTERNAL_SECURE_PORT
+
+
+
+#                             MESSAGE-ROUTER                                                      MESSAGE-ROUTER
+#                             ++++++++++++++                                                      ++++++++++++++
+# localhost                               container                           service                                 pod
+# ===================================================================================================================================================
+# 20 MR_DMAAP_LOCALHOST_PORT         ->   23 MR_INTERNAL_PORT                 25 MR_EXTERNAL_PORT                ->   27 MR_INTERNAL_PORT
+# 22 MR_DMAAP_LOCALHOST_SECURE_PORT  ->   24 MR_INTERNAL_SECURE_PORT          26 MR_EXTERNAL_SECURE_PORT   		 ->   28 MR_INTERNAL_SECURE_PORT
+
+
+# Running only the MR-STUB - apps using MR-STUB
+# DOCKER                                                                      KUBE
+# localhost:          10 and 12                                                -
+# via proxy (script): 13 and 14                                               via proxy (script): 15 and 16
+# apps:               13 and 14                                               apps:               15 and 16
+
+# Running MR-STUB (as frontend for test script) and MESSAGE-ROUTER - apps using MESSAGE-ROUTER
+# DOCKER                                                                      KUBE
+# localhost:          10 and 12                                                -
+# via proxy (script): 13 and 14                                               via proxy (script): 15 and 16
+# apps:               23 and 24                                               apps:               25 and 26
+#
+
+
+
 use_mr_http() {
-	__mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT
+	__mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
 }
 
 use_mr_https() {
-	__mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
+	__mr_set_protocoll "https" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
 }
 
 # Setup paths to svc/container for internal and external access
-# args: <protocol> <internal-port> <external-port> <mr-stub-internal-port> <mr-stub-external-port> <mr-stub-internal-secure-port> <mr-stub-external-secure-port>
+# args: <protocol> <internal-port> <external-port> <internal-secure-port> <external-secure-port>
 __mr_set_protocoll() {
 	echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD
 	echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME"
@@ -214,39 +279,60 @@
 
 	MR_HTTPX=$1
 
+	if [ $MR_HTTPX == "http" ]; then
+		INT_PORT=$2
+		EXT_PORT=$3
+	else
+		INT_PORT=$4
+		EXT_PORT=$5
+	fi
+
 	# Access via test script
-	MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2  # access from script via proxy, docker
-	MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker
+	MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$INT_PORT  # access from script via proxy, docker
+	MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$INT_PORT # access from script via proxy, docker
+	MR_DMAAP_ADAPTER_HTTP="" # Access to dmaap mr via proyx - set only if app is included
 
 	MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker -  access pod->svc, kube
+	MR_KAFKA_SERVICE_PATH=""
 	__check_included_image "DMAAPMR"
 	if [ $? -eq 0 ]; then
 		MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker -  access pod->svc, kube
+		MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+
+		MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME":"$MR_KAFKA_PORT
 	fi
 
 	# For directing calls from script to e.g.PMS via message rounter
-	# Theses case shall always go though the  mr-stub
-	MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4
-	MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6
+	# These cases shall always go though the  mr-stub
+	MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$2
+	MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$4
+
+	MR_DMAAP_ADAPTER_TYPE="REST"
+
+
 
 	if [ $RUNMODE == "KUBE" ]; then
-		MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
-		MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+		MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$EXT_PORT # access from script via proxy, kube
+		MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$EXT_PORT # access from script via proxy, kube
 
 		MR_SERVICE_PATH=$MR_STUB_PATH
 		__check_included_image "DMAAPMR"
 		if [ $? -eq 0 ]; then
 			MR_SERVICE_PATH=$MR_DMAAP_PATH
+			MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+			MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_KAFKA_PORT
 		fi
 		__check_prestarted_image "DMAAPMR"
 		if [ $? -eq 0 ]; then
 			MR_SERVICE_PATH=$MR_DMAAP_PATH
+			MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+			MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_KAFKA_PORT
 		fi
 
 		# For directing calls from script to e.g.PMS, via message rounter
 		# These calls shall always go though the  mr-stub
-		MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5
-		MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7
+		MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3
+		MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$5
 	fi
 
 	# For calls from script to the mr-stub
@@ -254,8 +340,77 @@
 	MR_STUB_ADAPTER_TYPE="REST"
 
 	echo ""
+
 }
 
+
+# use_mr_http() {                2                3                  4                5                  6                       7
+# 	__mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT
+# }
+
+# use_mr_https() {
+# 	__mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
+# }
+
+# # Setup paths to svc/container for internal and external access
+# # args: <protocol> <internal-port> <external-port> <mr-stub-internal-port> <mr-stub-external-port> <mr-stub-internal-secure-port> <mr-stub-external-secure-port>
+# __mr_set_protocoll() {
+# 	echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD
+# 	echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME"
+
+# 	## Access to Dmaap mediator
+
+# 	MR_HTTPX=$1
+
+# 	# Access via test script
+# 	MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2  # access from script via proxy, docker
+# 	MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker
+# 	MR_DMAAP_ADAPTER_HTTP="" # Access to dmaap mr via proyx - set only if app is included
+
+# 	MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker -  access pod->svc, kube
+# 	__check_included_image "DMAAPMR"
+# 	if [ $? -eq 0 ]; then
+# 		MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker -  access pod->svc, kube
+# 		MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+# 	fi
+
+# 	# For directing calls from script to e.g.PMS via message rounter
+# 	# These cases shall always go though the  mr-stub
+# 	MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4
+# 	MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6
+
+# 	MR_DMAAP_ADAPTER_TYPE="REST"
+
+# 	if [ $RUNMODE == "KUBE" ]; then
+# 		MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+# 		MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+
+# 		MR_SERVICE_PATH=$MR_STUB_PATH
+# 		__check_included_image "DMAAPMR"
+# 		if [ $? -eq 0 ]; then
+# 			MR_SERVICE_PATH=$MR_DMAAP_PATH
+# 			MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+# 		fi
+# 		__check_prestarted_image "DMAAPMR"
+# 		if [ $? -eq 0 ]; then
+# 			MR_SERVICE_PATH=$MR_DMAAP_PATH
+# 			MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+# 		fi
+
+# 		# For directing calls from script to e.g.PMS, via message rounter
+# 		# These calls shall always go though the  mr-stub
+# 		MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5
+# 		MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7
+# 	fi
+
+# 	# For calls from script to the mr-stub
+# 	MR_STUB_ADAPTER=$MR_STUB_PATH
+# 	MR_STUB_ADAPTER_TYPE="REST"
+
+# 	echo ""
+
+# }
+
 # Export env vars for config files, docker compose and kube resources
 # args: -
 __dmaapmr_export_vars() {
@@ -272,6 +427,14 @@
 	export MR_DMAAP_LOCALHOST_SECURE_PORT
 	export MR_INTERNAL_SECURE_PORT
 	export MR_DMAAP_HOST_MNT_DIR
+
+	export KUBE_ONAP_NAMESPACE
+	export MR_EXTERNAL_PORT
+	export MR_EXTERNAL_SECURE_PORT
+	export MR_KAFKA_PORT
+	export MR_ZOOKEEPER_PORT
+
+	export MR_KAFKA_SERVICE_PATH
 }
 
 # Export env vars for config files, docker compose and kube resources
@@ -283,10 +446,17 @@
 	export MRSTUB_IMAGE
 	export MR_INTERNAL_PORT
 	export MR_INTERNAL_SECURE_PORT
+	export MR_EXTERNAL_PORT
+	export MR_EXTERNAL_SECURE_PORT
 	export MR_STUB_LOCALHOST_PORT
 	export MR_STUB_LOCALHOST_SECURE_PORT
 	export MR_STUB_CERT_MOUNT_DIR
 	export MR_STUB_DISPLAY_NAME
+
+	export KUBE_ONAP_NAMESPACE
+	export MR_EXTERNAL_PORT
+
+	export MR_KAFKA_SERVICE_PATH
 }
 
 
@@ -358,53 +528,33 @@
 
 			__dmaapmr_export_vars
 
-			#export MR_DMAAP_APP_NAME
-			export MR_DMAAP_KUBE_APP_NAME=message-router
-			MR_DMAAP_APP_NAME=$MR_DMAAP_KUBE_APP_NAME
-			export KUBE_ONAP_NAMESPACE
-			export MR_EXTERNAL_PORT
-			export MR_INTERNAL_PORT
-			export MR_EXTERNAL_SECURE_PORT
-			export MR_INTERNAL_SECURE_PORT
-			export ONAP_DMAAPMR_IMAGE
-
-			export MR_KAFKA_BWDS_NAME=akfak-bwds
-			export MR_KAFKA_BWDS_NAME=kaka
-			export KUBE_ONAP_NAMESPACE
-
-			export MR_ZOOKEEPER_APP_NAME
-			export ONAP_ZOOKEEPER_IMAGE
-
 			#Check if onap namespace exists, if not create it
 			__kube_create_namespace $KUBE_ONAP_NAMESPACE
 
-			# TODO - Fix domain name substitution in the prop file
-			# Create config maps - dmaapmr app
-			configfile=$PWD/tmp/MsgRtrApi.properties
-			cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/KUBE-MsgRtrApi.properties $configfile
+			# copy config files
+			MR_MNT_CONFIG_BASEPATH=$SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_MNT_DIR
+			cp -r $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_CONFIG_DIR/*  $MR_MNT_CONFIG_BASEPATH
 
+			# Create config maps - dmaapmr app
+			configfile=$MR_MNT_CONFIG_BASEPATH/mr/MsgRtrApi.properties
 			output_yaml=$PWD/tmp/dmaapmr_msgrtrapi_cfc.yaml
 			__kube_create_configmap dmaapmr-msgrtrapi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
-			configfile=$PWD/tmp/logback.xml
-			cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/logback.xml $configfile
+			configfile=$MR_MNT_CONFIG_BASEPATH/mr/logback.xml
 			output_yaml=$PWD/tmp/dmaapmr_logback_cfc.yaml
 			__kube_create_configmap dmaapmr-logback.xml $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
-			configfile=$PWD/tmp/cadi.properties
-			cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/cadi.properties $configfile
+			configfile=$MR_MNT_CONFIG_BASEPATH/mr/cadi.properties
 			output_yaml=$PWD/tmp/dmaapmr_cadi_cfc.yaml
 			__kube_create_configmap dmaapmr-cadi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
 			# Create config maps - kafka app
-			configfile=$PWD/tmp/zk_client_jaas.conf
-			cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/kafka/zk_client_jaas.conf $configfile
+			configfile=$MR_MNT_CONFIG_BASEPATH/kafka/zk_client_jaas.conf
 			output_yaml=$PWD/tmp/dmaapmr_zk_client_cfc.yaml
 			__kube_create_configmap dmaapmr-zk-client-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
 			# Create config maps - zookeeper app
-			configfile=$PWD/tmp/zk_server_jaas.conf
-			cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/zk/zk_server_jaas.conf $configfile
+			configfile=$MR_MNT_CONFIG_BASEPATH/zk/zk_server_jaas.conf
 			output_yaml=$PWD/tmp/dmaapmr_zk_server_cfc.yaml
 			__kube_create_configmap dmaapmr-zk-server-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
@@ -419,42 +569,69 @@
 			__kube_create_instance app $MR_DMAAP_APP_NAME $input_yaml $output_yaml
 
 
-			echo " Retrieving host and ports for service..."
-			MR_DMAAP_HOST_NAME=$(__kube_get_service_host $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE)
+			# echo " Retrieving host and ports for service..."
+			# MR_DMAAP_HOST_NAME=$(__kube_get_service_host $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE)
 
-			MR_EXT_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "http")
-			MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "https")
+			# MR_EXT_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "http")
+			# MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "https")
 
-			echo " Host IP, http port, https port: $MR_DMAAP_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
-			MR_SERVICE_PATH=""
-			if [ $MR_HTTPX == "http" ]; then
-				MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_PORT
-				MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
-			else
-				MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_SECURE_PORT
-				MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+			# echo " Host IP, http port, https port: $MR_DMAAP_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
+			# MR_SERVICE_PATH=""
+			# if [ $MR_HTTPX == "http" ]; then
+			# 	MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_PORT
+			# 	MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
+			# else
+			# 	MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_SECURE_PORT
+			# 	MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+			# fi
+
+			__check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
+
+			# Cannot create topics, returns 400 forever.....topics will be created during pipeclean below
+			#__create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+
+			#__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+
+#			__dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
+#
+#			__dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
+
+
+			#__dmaap_pipeclean "unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=1000&limit=100"
+			#__dmaap_pipeclean "unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=1000&limit=100"
+
+			if [ $# -gt 0 ]; then
+				if [ $(($#%3)) -eq 0 ]; then
+					while [ $# -gt 0 ]; do
+						__dmaap_pipeclean "$1" "$2/$1" "$2/$1/$3?timeout=1000&limit=100"
+						shift; shift; shift;
+					done
+				else
+					echo -e $RED" args: start_mr [<topic-name> <base-url> <group-and-user-url>]*"$ERED
+					echo -e $RED" Got: $@"$ERED
+					exit 1
+				fi
 			fi
 
-				__check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
+			echo " Current topics:"
+			curlString="$MR_DMAAP_PATH/topics"
+			result=$(__do_curl "$curlString")
+			echo $result | indent2
 
 		fi
 
 		if [ $retcode_included_mr -eq 0 ]; then
-			#exporting needed var for deployment
-			export MR_STUB_APP_NAME
-			export KUBE_ONAP_NAMESPACE
-			export MRSTUB_IMAGE
-			export MR_INTERNAL_PORT
-			export MR_INTERNAL_SECURE_PORT
-			export MR_EXTERNAL_PORT
-			export MR_EXTERNAL_SECURE_PORT
+
+			__mr_export_vars
 
 			if [ $retcode_prestarted_dmaapmr -eq 0 ] || [ $retcode_included_dmaapmr -eq 0 ]; then  # Set topics for dmaap
 				export TOPIC_READ="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT/events/$MR_READ_TOPIC"
 				export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100"
+				export GENERIC_TOPICS_UPLOAD_BASEURL="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT"
 			else
 				export TOPIC_READ=""
 				export TOPIC_WRITE=""
+				export GENERIC_TOPICS_UPLOAD_BASEURL=""
 			fi
 
 			#Check if onap namespace exists, if not create it
@@ -473,30 +650,29 @@
 
 		fi
 
+		# echo " Retrieving host and ports for service..."
+		# MR_STUB_HOST_NAME=$(__kube_get_service_host $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE)
 
-		echo " Retrieving host and ports for service..."
-		MR_STUB_HOST_NAME=$(__kube_get_service_host $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE)
+		# MR_EXT_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "http")
+		# MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "https")
 
-		MR_EXT_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "http")
-		MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "https")
+		# echo " Host IP, http port, https port: $MR_STUB_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
+		# if [ $MR_HTTPX == "http" ]; then
+		# 	MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
+		# 	if [ -z "$MR_SERVICE_PATH" ]; then
+		# 		MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
+		# 	fi
+		# else
+		# 	MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
+		# 	if [ -z "$MR_SERVICE_PATH" ]; then
+		# 		MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+		# 	fi
+		# fi
+		# MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
+		# MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
 
-		echo " Host IP, http port, https port: $MR_STUB_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
-		if [ $MR_HTTPX == "http" ]; then
-			MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
-			if [ -z "$MR_SERVICE_PATH" ]; then
-				MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
-			fi
-		else
-			MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
-			if [ -z "$MR_SERVICE_PATH" ]; then
-				MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
-			fi
-		fi
-		MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
-		MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
-
-		MR_STUB_ADAPTER=$MR_STUB_PATH
-		MR_STUB_ADAPTER_TYPE="REST"
+		# MR_STUB_ADAPTER=$MR_STUB_PATH
+		# MR_STUB_ADAPTER_TYPE="REST"
 
 		__check_service_start $MR_STUB_APP_NAME $MR_STUB_PATH$MR_STUB_ALIVE_URL
 
@@ -532,26 +708,55 @@
 
 		export TOPIC_READ=""
         export TOPIC_WRITE=""
+		export GENERIC_TOPICS_UPLOAD_BASEURL=""
 		if [ $retcode_dmaapmr -eq 0 ]; then  # Set topics for dmaap
 			export TOPIC_READ="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_READ_TOPIC"
 			export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100"
+			export GENERIC_TOPICS_UPLOAD_BASEURL="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT"
 		fi
 
 		__dmaapmr_export_vars
 
 		if [ $retcode_dmaapmr -eq 0 ]; then
+
+			# copy config files
+			MR_MNT_CONFIG_BASEPATH=$SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_MNT_DIR
+			cp -r $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_CONFIG_DIR/*  $MR_MNT_CONFIG_BASEPATH
+
+			# substitute vars
+			configfile=$MR_MNT_CONFIG_BASEPATH/mr/MsgRtrApi.properties
+			cp $configfile $configfile"_tmp"
+			envsubst < $configfile"_tmp" > $configfile
+
 			__start_container $MR_DMAAP_COMPOSE_DIR "" NODOCKERARGS 1 $MR_DMAAP_APP_NAME
 
 			__check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
 
 
-			__create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+			# Cannot create topics, returns 400 forever.....topics will be created during pipeclean below
+			#__create_topic $MR_READ_TOPIC "Topic for reading policy messages"
 
-			__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+			#__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
 
-			__dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
+			#__dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
 
-			__dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
+			#__dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
+
+			if [ $# -gt 0 ]; then
+				if [ $(($#%3)) -eq 0 ]; then
+					while [ $# -gt 0 ]; do
+						__dmaap_pipeclean "$1" "$2/$1" "$2/$1/$3?timeout=1000&limit=100"
+						shift; shift; shift;
+					done
+				else
+					echo -e $RED" args: start_mr [<topic-name> <base-url> <group-and-user-url>]*"$ERED
+					echo -e $RED" Got: $@"$ERED
+					exit 1
+				fi
+			fi
+
+			#__dmaap_pipeclean "unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=1000&limit=100"
+			#__dmaap_pipeclean "unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=1000&limit=100"
 
 			echo " Current topics:"
 			curlString="$MR_DMAAP_PATH/topics"
@@ -575,23 +780,25 @@
 # Create a dmaap mr topic
 # args: <topic name> <topic-description>
 __create_topic() {
-	echo -ne " Creating read topic: $1"$SAMELINE
+	echo -ne " Creating topic: $1"$SAMELINE
 
 	json_topic="{\"topicName\":\"$1\",\"partitionCount\":\"2\", \"replicationCount\":\"3\", \"transactionEnabled\":\"false\",\"topicDescription\":\"$2\"}"
-	echo $json_topic > ./tmp/$1.json
+	fname="./tmp/$1.json"
+	echo $json_topic > $fname
 
-	curlString="$MR_DMAAP_PATH/topics/create -X POST  -H Content-Type:application/json -d@./tmp/$1.json"
-	topic_retries=5
+	query="/topics/create"
+	topic_retries=10
 	while [ $topic_retries -gt 0 ]; do
 		let topic_retries=topic_retries-1
-		result=$(__do_curl "$curlString")
-		if [ $? -eq 0 ]; then
+		res="$(__do_curl_to_api DMAAPMR POST $query $fname)"
+		status=${res:${#res}-3}
+
+		if [[ $status == "2"* ]]; then
 			topic_retries=0
-			echo -e " Creating read topic: $1 $GREEN OK $EGREEN"
-		fi
-		if [ $? -ne 0 ]; then
+			echo -e " Creating topic: $1 $GREEN OK $EGREEN"
+		else
 			if [ $topic_retries -eq 0 ]; then
-				echo -e " Creating read topic: $1 $RED Failed $ERED"
+				echo -e " Creating topic: $1 $RED Failed $ERED"
 				((RES_CONF_FAIL++))
 				return 1
 			else
@@ -599,18 +806,27 @@
 			fi
 		fi
 	done
+	echo
 	return 0
 }
 
 # Do a pipeclean of a topic - to overcome dmaap mr bug...
-# args: <topic> <post-url> <read-url>
+# args: <topic> <post-url> <read-url> [<num-retries>]
 __dmaap_pipeclean() {
 	pipeclean_retries=50
+	if [ $# -eq 4 ]; then
+		pipeclean_retries=$4
+	fi
 	echo -ne " Doing dmaap-mr pipe cleaning on topic: $1"$SAMELINE
 	while [ $pipeclean_retries -gt 0 ]; do
-		echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/pipeclean.json
+		if [[ $1 == *".text" ]]; then
+			echo "pipeclean-$1:$pipeclean_retries" > ./tmp/__dmaap_pipeclean.txt
+			curlString="$MR_DMAAP_PATH$2 -X POST  -H Content-Type:text/plain -d@./tmp/__dmaap_pipeclean.txt"
+		else
+			echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/__dmaap_pipeclean.json
+			curlString="$MR_DMAAP_PATH$2 -X POST  -H Content-Type:application/json -d@./tmp/__dmaap_pipeclean.json"
+		fi
 		let pipeclean_retries=pipeclean_retries-1
-		curlString="$MR_DMAAP_PATH$2 -X POST  -H Content-Type:application/json -d@./tmp/pipeclean.json"
 		result=$(__do_curl "$curlString")
 		if [ $? -ne 0 ]; then
 			sleep 1
@@ -688,7 +904,7 @@
 # arg: <topic-url> <json-msg>
 # (Function for test scripts)
 mr_api_send_json() {
-	__log_test_start $@
+	__log_conf_start $@
     if [ $# -ne 2 ]; then
         __print_err "<topic-url> <json-msg>" $@
         return 1
@@ -700,10 +916,139 @@
 
 	status=${res:${#res}-3}
 	if [ $status -ne 200 ]; then
-		__log_test_fail_status_code 200 $status
+		__log_conf_fail_status_code 200 $status
 		return 1
 	fi
 
-	__log_test_pass
+	__log_conf_ok
+	return 0
+}
+
+# Send text to topic in mr-stub.
+# arg: <topic-url> <text-msg>
+# (Function for test scripts)
+mr_api_send_text() {
+	__log_conf_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <text-msg>" $@
+        return 1
+    fi
+	query=$1
+	fname=$PWD/tmp/text_payload_to_mr.txt
+	echo $2 > $fname
+	res="$(__do_curl_to_api MRSTUB POST $query $fname text/plain)"
+
+	status=${res:${#res}-3}
+	if [ $status -ne 200 ]; then
+		__log_conf_fail_status_code 200 $status
+		return 1
+	fi
+
+	__log_conf_ok
+	return 0
+}
+
+# Send json file to topic in mr-stub.
+# arg: <topic-url> <json-file>
+# (Function for test scripts)
+mr_api_send_json_file() {
+	__log_conf_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <json-file>" $@
+        return 1
+    fi
+	query=$1
+	if [ ! -f $2 ]; then
+		__log_test_fail_general "File $2 does not exist"
+		return 1
+	fi
+	#Create json array for mr
+	datafile="tmp/mr_api_send_json_file.json"
+	{ echo -n "[" ; cat $2 ; echo -n "]" ;} > $datafile
+
+	res="$(__do_curl_to_api MRSTUB POST $query $datafile)"
+
+	status=${res:${#res}-3}
+	if [ $status -ne 200 ]; then
+		__log_conf_fail_status_code 200 $status
+		return 1
+	fi
+
+	__log_conf_ok
+	return 0
+}
+
+# Send text file to topic in mr-stub.
+# arg: <topic-url> <text-file>
+# (Function for test scripts)
+mr_api_send_text_file() {
+	__log_conf_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <text-file>" $@
+        return 1
+    fi
+	query=$1
+	if [ ! -f $2 ]; then
+		__log_test_fail_general "File $2 does not exist"
+		return 1
+	fi
+
+	res="$(__do_curl_to_api MRSTUB POST $query $2 text/plain)"
+
+	status=${res:${#res}-3}
+	if [ $status -ne 200 ]; then
+		__log_conf_fail_status_code 200 $status
+		return 1
+	fi
+
+	__log_conf_ok
+	return 0
+}
+
+# Create json file for payload
+# arg: <size-in-kb> <filename>
+mr_api_generate_json_payload_file() {
+	__log_conf_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <json-file>" $@
+        return 1
+    fi
+	if [ $1 -lt 1 ] || [ $1 -gt 10000 ]; then
+		__log_conf_fail_general "Only size between 1k and 10000k supported"
+		return 1
+	fi
+	echo -n "{\"a\":[" > $2
+	LEN=$(($1*150))
+	echo -n "\"a0\"" >> $2
+	for ((idx=1; idx<$LEN; idx++))
+	do
+		echo -n ",\"a$idx\"" >> $2
+	done
+	echo -n "]}" >> $2
+
+	__log_conf_ok
+	return 0
+}
+
+# Create tet file for payload
+# arg: <size-in-kb> <filename>
+mr_api_generate_text_payload_file() {
+	__log_conf_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <text-file>" $@
+        return 1
+    fi
+	if [ $1 -lt 1 ] || [ $1 -gt 10000 ]; then
+		__log_conf_fail_general "Only size between 1k and 10000k supported"
+		return 1
+	fi
+	echo -n "" > $2
+	LEN=$(($1*100))
+	for ((idx=0; idx<$LEN; idx++))
+	do
+		echo -n "ABCDEFGHIJ" >> $2
+	done
+
+	__log_conf_ok
 	return 0
 }
diff --git a/test/common/prodstub_api_functions.sh b/test/common/prodstub_api_functions.sh
index bb4ccf5..6c3ce23 100644
--- a/test/common/prodstub_api_functions.sh
+++ b/test/common/prodstub_api_functions.sh
@@ -107,6 +107,18 @@
 	use_prod_stub_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PRODSTUB_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "PRODSTUB $PROD_STUB_APP_NAME $KUBE_SIM_NAMESPACE"
+	else
+		echo "PRODSTUB $PROD_STUB_APP_NAME"
+	fi
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Prod stub sim
diff --git a/test/common/pvccleaner_api_functions.sh b/test/common/pvccleaner_api_functions.sh
index 62c2d43..5d37bd0 100644
--- a/test/common/pvccleaner_api_functions.sh
+++ b/test/common/pvccleaner_api_functions.sh
@@ -90,6 +90,14 @@
 	:
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PVCCLEANER_statisics_setup() {
+	echo ""
+}
+
 #######################################################
 
 # This is a system app, all usage in testcase_common.sh
\ No newline at end of file
diff --git a/test/common/rapp_catalogue_api_functions.sh b/test/common/rapp_catalogue_api_functions.sh
index 52416d3..537bc0c 100644
--- a/test/common/rapp_catalogue_api_functions.sh
+++ b/test/common/rapp_catalogue_api_functions.sh
@@ -84,6 +84,18 @@
 	use_rapp_catalogue_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__RC_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo "RC $RAPP_CAT_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+	else
+		echo "RC $RAPP_CAT_APP_NAME"
+	fi
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Rapp catalogue
diff --git a/test/common/ricsimulator_api_functions.sh b/test/common/ricsimulator_api_functions.sh
index f760313..695b535 100644
--- a/test/common/ricsimulator_api_functions.sh
+++ b/test/common/ricsimulator_api_functions.sh
@@ -91,6 +91,18 @@
 	use_simulator_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__RICSIM_statisics_setup() {
+	if [ $RUNMODE == "KUBE" ]; then
+		echo ""
+	else
+		echo ""
+	fi
+}
+
 #######################################################
 
 
diff --git a/test/common/test_env-onap-guilin.sh b/test/common/test_env-onap-guilin.sh
index 8344f38..6cb18f5 100755
--- a/test/common/test_env-onap-guilin.sh
+++ b/test/common/test_env-onap-guilin.sh
@@ -161,9 +161,9 @@
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -189,7 +189,7 @@
 POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
 POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -210,10 +210,12 @@
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback Reciever"
@@ -222,6 +224,8 @@
 CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver container external secure port (host -> container)
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -310,6 +314,12 @@
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
diff --git a/test/common/test_env-onap-honolulu.sh b/test/common/test_env-onap-honolulu.sh
index 00e5d4b..c293420 100755
--- a/test/common/test_env-onap-honolulu.sh
+++ b/test/common/test_env-onap-honolulu.sh
@@ -185,9 +185,9 @@
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -233,7 +233,7 @@
 ECS_VERSION="V1-2"                                       # Version where the types are added in the producer registration
 ECS_FEATURE_LEVEL=""                                     # Space separated list of features
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -254,9 +254,12 @@
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback Reciever"
@@ -266,6 +269,8 @@
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -378,6 +383,12 @@
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
diff --git a/test/common/test_env-onap-istanbul.sh b/test/common/test_env-onap-istanbul.sh
index f8c411f..5b11137 100644
--- a/test/common/test_env-onap-istanbul.sh
+++ b/test/common/test_env-onap-istanbul.sh
@@ -69,10 +69,10 @@
 
 # Policy Agent image and tags
 POLICY_AGENT_IMAGE_BASE="onap/ccsdk-oran-a1policymanagementservice"
-POLICY_AGENT_IMAGE_TAG_LOCAL="1.3.0-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.3.0-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE="1.3.0-STAGING-latest" #Will use snapshot repo
-POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.3.0"
+POLICY_AGENT_IMAGE_TAG_LOCAL="1.2.4-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.4-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE="1.2.4-STAGING-latest" #Will use snapshot repo
+POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.2.3"
 
 # SDNC A1 Controller remote image and tag
 SDNC_A1_CONTROLLER_IMAGE_BASE="onap/sdnc-image"
@@ -146,17 +146,17 @@
 
 #ONAP Zookeeper remote image and tag
 ONAP_ZOOKEEPER_IMAGE_BASE="onap/dmaap/zookeeper"
-ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.0.3"
+ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.1.0"
 #No local image for ONAP Zookeeper, remote image always used
 
 #ONAP Kafka remote image and tag
 ONAP_KAFKA_IMAGE_BASE="onap/dmaap/kafka111"
-ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.0.4"
+ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.1"
 #No local image for ONAP Kafka, remote image always used
 
 #ONAP DMAAP-MR remote image and tag
 ONAP_DMAAPMR_IMAGE_BASE="onap/dmaap/dmaap-mr"
-ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
+ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.3.0"
 #No local image for ONAP DMAAP-MR, remote image always used
 
 #Kube proxy remote image and tag
@@ -188,9 +188,9 @@
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -236,7 +236,7 @@
 ECS_VERSION="V1-2"                                       # Version where the types are added in the producer registration
 ECS_FEATURE_LEVEL="INFO-TYPES"                           # Space separated list of features
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -257,9 +257,12 @@
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt2"                             # Config files dir on localhost
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback Reciever"
@@ -269,6 +272,8 @@
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -397,6 +402,12 @@
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
diff --git a/test/common/test_env-oran-cherry.sh b/test/common/test_env-oran-cherry.sh
index 43077ea..641aabe 100755
--- a/test/common/test_env-oran-cherry.sh
+++ b/test/common/test_env-oran-cherry.sh
@@ -188,9 +188,9 @@
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -236,7 +236,7 @@
 ECS_VERSION="V1-2"                                       # Version where the types are added in the producer registration
 ECS_FEATURE_LEVEL=""                                     # Space separated list of features
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -257,10 +257,12 @@
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback Reciever"
@@ -269,6 +271,8 @@
 CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver container external secure port (host -> container)
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -378,6 +382,12 @@
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
diff --git a/test/common/test_env-oran-d-release.sh b/test/common/test_env-oran-d-release.sh
index cc510d5..18f7e17 100755
--- a/test/common/test_env-oran-d-release.sh
+++ b/test/common/test_env-oran-d-release.sh
@@ -207,9 +207,9 @@
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -255,7 +255,7 @@
 ECS_VERSION="V1-2"                                       # Version where the types are decoupled from the producer registration
 ECS_FEATURE_LEVEL="INFO-TYPES"                           # Space separated list of features
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -276,10 +276,12 @@
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback receiver"
@@ -288,6 +290,8 @@
 CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver container external secure port (host -> container)
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -441,6 +445,12 @@
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
diff --git a/test/common/test_env-oran-e-release.sh b/test/common/test_env-oran-e-release.sh
index e2b53da..546e94c 100755
--- a/test/common/test_env-oran-e-release.sh
+++ b/test/common/test_env-oran-e-release.sh
@@ -235,7 +235,7 @@
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
 KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -281,7 +281,7 @@
 ECS_VERSION="V1-2"                                       # Version where the types are decoupled from the producer registration
 ECS_FEATURE_LEVEL="INFO-TYPES TYPE-SUBSCRIPTIONS INFO-TYPE-INFO"  # Space separated list of features
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -302,10 +302,12 @@
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt2"                            # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback receiver"
@@ -315,6 +317,7 @@
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
 CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -478,6 +481,10 @@
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
 
+PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+
 DMAAP_ADP_APP_NAME="dmaapadapterservice"                 # Name for Dmaap Adapter container
 DMAAP_ADP_DISPLAY_NAME="Dmaap Adapter Service"           # Display name for Dmaap Adapter container
 DMAAP_ADP_EXTERNAL_PORT=9087                             # Dmaap Adapter container external port (host -> container)
@@ -511,18 +518,13 @@
 #DMAAP_MED_CERT_MOUNT_DIR="./cert"
 DMAAP_MED_ALIVE_URL="/status"                            # Base path for alive check
 DMAAP_MED_COMPOSE_DIR="dmaapmed"                         # Dir in simulator_group for docker-compose
-#MAAP_MED_CONFIG_MOUNT_PATH="/app" # Internal container path for configuration
-DMAAP_MED_DATA_MOUNT_PATH="/configs" # Path in container for data file
-DMAAP_MED_DATA_FILE="type_config.json"  # Container data file name
-#DMAAP_MED_CONFIG_FILE=application.yaml                   # Config file name
-
-PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
-PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
-PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+#MAAP_MED_CONFIG_MOUNT_PATH="/app"                       # Internal container path for configuration
+DMAAP_MED_DATA_MOUNT_PATH="/configs"                     # Path in container for data file
+DMAAP_MED_DATA_FILE="type_config.json"                   # Container data file name
 
 ########################################
 # Setting for common curl-base function
 ########################################
 
-UUID=""                                                   # UUID used as prefix to the policy id to simulate a real UUID
-                                                          # Testscript need to set the UUID otherwise this empty prefix is used
+UUID=""                                                  # UUID used as prefix to the policy id to simulate a real UUID
+                                                         # Testscript need to set the UUID otherwise this empty prefix is used
diff --git a/test/common/testcase_common.sh b/test/common/testcase_common.sh
index 8d832d7..78eeb54 100755
--- a/test/common/testcase_common.sh
+++ b/test/common/testcase_common.sh
@@ -28,7 +28,7 @@
 	echo "      [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+]  [--use-snapshot-image <app-nam>+]"
 	echo "      [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
 	echo "      [--repo-policy local|remote] [--cluster-timeout <timeout-in seconds>] [--print-stats]"
-	echo "      [--override <override-environment-filename> --pre-clean]"
+	echo "      [--override <override-environment-filename> --pre-clean --gen-stats]"
 }
 
 if [ $# -eq 1 ] && [ "$1" == "help" ]; then
@@ -59,6 +59,7 @@
 	echo "--print-stats         -  Print current test stats after each test."
 	echo "--override <file>     -  Override setting from the file supplied by --env-file"
 	echo "--pre-clean           -  Will clean kube resouces when running docker and vice versa"
+	echo "--gen-stats           -  Collect container/pod runtime statistics"
 
 	echo ""
 	echo "List of app short names supported: "$APP_SHORT_NAMES
@@ -207,6 +208,9 @@
 #Var to control if current stats shall be printed
 PRINT_CURRENT_STATS=0
 
+#Var to control if container/pod runtim statistics shall be collected
+COLLECT_RUNTIME_STATS=0
+
 #File to keep deviation messages
 DEVIATION_FILE=".tmp_deviations"
 rm $DEVIATION_FILE &> /dev/null
@@ -222,6 +226,9 @@
 }
 trap trap_fnc ERR
 
+# Trap to kill subprocesses
+trap "kill 0" EXIT
+
 # Counter for tests
 TEST_SEQUENCE_NR=1
 
@@ -652,6 +659,15 @@
 			foundparm=0
 		fi
 	fi
+	if [ $paramerror -eq 0 ]; then
+		if [ "$1" == "--gen-stats" ]; then
+			COLLECT_RUNTIME_STATS=1
+			echo "Option set - Collect runtime statistics"
+			shift;
+			foundparm=0
+		fi
+	fi
+
 done
 echo ""
 
@@ -768,7 +784,7 @@
 	fi
 fi
 if [ $RUNMODE == "DOCKER" ]; then
-	tmp=$(docker-compose version | grep -i 'Docker Compose version')
+	tmp=$(docker-compose version | grep -i 'docker' | grep -i 'compose' | grep -i 'version')
 	if [[ "$tmp" == *'v2'* ]]; then
 		echo -e $RED"docker-compose is using docker-compose version 2"$ERED
 		echo -e $RED"The test environment only support version 1"$ERED
@@ -1449,6 +1465,8 @@
 	echo -e $BOLD"======================================================="$EBOLD
 	echo ""
 
+	LOG_STAT_ARGS=""
+
 	for imagename in $APP_SHORT_NAMES; do
 		__check_included_image $imagename
 		retcode_i=$?
@@ -1464,9 +1482,16 @@
 
 			function_pointer="__"$imagename"_initial_setup"
 			$function_pointer
+
+			function_pointer="__"$imagename"_statisics_setup"
+			LOG_STAT_ARGS=$LOG_STAT_ARGS" "$($function_pointer)
 		fi
 	done
 
+	if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then
+		../common/genstat.sh $RUNMODE $SECONDS $TESTLOGS/$ATC/stat_data.csv $LOG_STAT_ARGS &
+	fi
+
 }
 
 # Function to print the test result, shall be the last cmd in a test script
@@ -1498,8 +1523,16 @@
 	echo "Timer measurement in the test script"
 	echo "===================================="
 	column -t -s $'\t' $TIMER_MEASUREMENTS
+	if [ $RES_PASS != $RES_TEST ]; then
+		echo -e $RED"Measurement may not be reliable when there are failed test - script timeouts may cause long measurement values"$ERED
+	fi
 	echo ""
 
+	if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then
+		echo "Runtime statistics collected in file: "$TESTLOGS/$ATC/stat_data.csv
+		echo ""
+	fi
+
 	total=$((RES_PASS+RES_FAIL))
 	if [ $RES_TEST -eq 0 ]; then
 		echo -e "\033[1mNo tests seem to have been executed. Check the script....\033[0m"
@@ -2142,41 +2175,6 @@
 	return 0
 }
 
-# Function to create a configmap in kubernetes
-# args: <configmap-name> <namespace> <labelname> <labelid> <path-to-data-file> <path-to-output-yaml>
-# (Not for test scripts)
-__kube_create_configmapXXXXXXXXXXXXX() {
-	echo -ne " Creating configmap $1 "$SAMELINE
-	#envsubst < $5 > $5"_tmp"
-	#cp $5"_tmp" $5  #Need to copy back to orig file name since create configmap neeed the original file name
-	kubectl create configmap $1  -n $2 --from-file=$5 --dry-run=client -o yaml > $6
-	if [ $? -ne 0 ]; then
-		echo -e " Creating configmap $1 $RED Failed $ERED"
-		((RES_CONF_FAIL++))
-		return 1
-	fi
-
-	kubectl apply -f $6 1> /dev/null 2> ./tmp/kubeerr
-	if [ $? -ne 0 ]; then
-		echo -e " Creating configmap $1 $RED Apply failed $ERED"
-		echo "  Message: $(<./tmp/kubeerr)"
-		((RES_CONF_FAIL++))
-		return 1
-	fi
-	kubectl label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr
-	if [ $? -ne 0 ]; then
-		echo -e " Creating configmap $1 $RED Labeling failed $ERED"
-		echo "  Message: $(<./tmp/kubeerr)"
-		((RES_CONF_FAIL++))
-		return 1
-	fi
-	# Log the resulting map
-	kubectl get configmap $1 -n $2 -o yaml > $6
-
-	echo -e " Creating configmap $1 $GREEN OK $EGREEN"
-	return 0
-}
-
 # This function runs a kubectl cmd where a single output value is expected, for example get ip with jsonpath filter.
 # The function retries up to the timeout given in the cmd flag '--cluster-timeout'
 # args: <full kubectl cmd with parameters>
@@ -2294,12 +2292,14 @@
 		if [ $PRE_CLEAN -eq 1 ]; then
 			echo " Clean docker resouces to free up resources, may take time..."
 			../common/clean_docker.sh 2&>1 /dev/null
+			echo ""
 		fi
 	else
 		__clean_containers
 		if [ $PRE_CLEAN -eq 1 ]; then
-			echo " Clean kubernetes resouces to free up resources, may take time..."
+			echo " Cleaning kubernetes resouces to free up resources, may take time..."
 			../common/clean_kube.sh 2&>1 /dev/null
+			echo ""
 		fi
 	fi
 }
diff --git a/test/cr/app/cr.py b/test/cr/app/cr.py
index 4b4d8da..94ef606 100644
--- a/test/cr/app/cr.py
+++ b/test/cr/app/cr.py
@@ -25,6 +25,7 @@
 import logging
 import socket
 from threading import RLock
+from hashlib import md5
 
 # Disable all logging of GET on reading counters and db
 class AjaxFilter(logging.Filter):
@@ -54,6 +55,7 @@
 # Request and response constants
 CALLBACK_URL="/callbacks/<string:id>"
 CALLBACK_MR_URL="/callbacks-mr/<string:id>" #Json list with string encoded items
+CALLBACK_TEXT_URL="/callbacks-text/<string:id>" # Callback for string of text
 APP_READ_URL="/get-event/<string:id>"
 APP_READ_ALL_URL="/get-all-events/<string:id>"
 DUMP_ALL_URL="/db"
@@ -111,7 +113,14 @@
                 cntr_callbacks[id][1]+=1
                 msg=msg_callbacks[id][0]
                 print("Fetching msg for id: "+id+", msg="+str(msg))
-                del msg[TIME_STAMP]
+
+                if (isinstance(msg,dict)):
+                    del msg[TIME_STAMP]
+                    if ("md5" in msg.keys()):
+                        print("EXTRACTED MD5")
+                        msg=msg["md5"]
+                        print("MD5: "+str(msg))
+
                 del msg_callbacks[id][0]
                 return json.dumps(msg),200
             print("No messages for id: "+id)
@@ -139,7 +148,8 @@
                 msg=msg_callbacks[id]
                 print("Fetching all msgs for id: "+id+", msg="+str(msg))
                 for sub_msg in msg:
-                    del sub_msg[TIME_STAMP]
+                    if (isinstance(sub_msg, dict)):
+                        del sub_msg[TIME_STAMP]
                 del msg_callbacks[id]
                 return json.dumps(msg),200
             print("No messages for id: "+id)
@@ -180,7 +190,8 @@
 
         with lock:
             cntr_msg_callbacks += 1
-            msg[TIME_STAMP]=str(datetime.now())
+            if (isinstance(msg, dict)):
+                msg[TIME_STAMP]=str(datetime.now())
             if (id in msg_callbacks.keys()):
                 msg_callbacks[id].append(msg)
             else:
@@ -202,8 +213,9 @@
     return 'OK',200
 
 
-# Receive a json callback message with payload fromatted accoirding to output frm the message router
-# URI and payload, (PUT or POST): /callbacks/<id> <json messages>
+# Receive a json callback message with payload formatted according to output from the message router
+# Array of stringified json objects
+# URI and payload, (PUT or POST): /callbacks-mr/<id> <json messages>
 # json is a list of string encoded json items
 # response: OK 200 or 500 for other errors
 @app.route(CALLBACK_MR_URL,
@@ -212,17 +224,21 @@
     global msg_callbacks
     global cntr_msg_callbacks
 
+    storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload
+                                        #Large payloads will otherwise overload the server
     try:
         print("Received callback (mr) for id: "+id +", content-type="+request.content_type)
-        remote_host_logging(request)
         print("raw data: str(request.data): "+str(request.data))
+        if (storeas is None):
+            print("raw data: str(request.data): "+str(request.data))
         do_delay()
         try:
             #if (request.content_type == MIME_JSON):
             if (MIME_JSON in request.content_type):
                 data = request.data
                 msg_list = json.loads(data)
-                print("Payload(json): "+str(msg_list))
+                if (storeas is None):
+                    print("Payload(json): "+str(msg_list))
             else:
                 msg_list=[]
                 print("Payload(content-type="+request.content_type+"). Setting empty json as payload")
@@ -234,11 +250,21 @@
         with lock:
             remote_host_logging(request)
             for msg in msg_list:
-                print("msg (str): "+str(msg))
-                msg=json.loads(msg)
-                print("msg (json): "+str(msg))
+                if (storeas is None):
+                    msg=json.loads(msg)
+                else:
+                    #Convert to compact json without ws between parameter and value...
+                    #It seem that ws is added somewhere along to way to this server
+                    msg=json.loads(msg)
+                    msg=json.dumps(msg, separators=(',', ':'))
+
+                    md5msg={}
+                    md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest()
+                    msg=md5msg
+                    print("msg (json converted to md5 hash): "+str(msg["md5"]))
                 cntr_msg_callbacks += 1
-                msg[TIME_STAMP]=str(datetime.now())
+                if (isinstance(msg, dict)):
+                    msg[TIME_STAMP]=str(datetime.now())
                 if (id in msg_callbacks.keys()):
                     msg_callbacks[id].append(msg)
                 else:
@@ -259,6 +285,73 @@
 
     return 'OK',200
 
+# Receive a callback message of a single text message (content type ignored)
+# or a json array of strings (content type json)
+# URI and payload, (PUT or POST): /callbacks-text/<id> <text message>
+# response: OK 200 or 500 for other errors
+@app.route(CALLBACK_TEXT_URL,
+    methods=['PUT','POST'])
+def events_write_text(id):
+    global msg_callbacks
+    global cntr_msg_callbacks
+
+    storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload
+                                        #Large payloads will otherwise overload the server
+    try:
+        print("Received callback for id: "+id +", content-type="+request.content_type)
+        remote_host_logging(request)
+        if (storeas is None):
+            print("raw data: str(request.data): "+str(request.data))
+        do_delay()
+
+        try:
+            msg_list=None
+            if (MIME_JSON in request.content_type):  #Json array of strings
+                msg_list=json.loads(request.data)
+            else:
+                data=request.data.decode("utf-8")    #Assuming string
+                msg_list=[]
+                msg_list.append(data)
+
+            for msg in msg_list:
+                if (storeas == "md5"):
+                    md5msg={}
+                    print("msg: "+str(msg))
+                    print("msg (endcode str): "+str(msg.encode('utf-8')))
+                    md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest()
+                    msg=md5msg
+                    print("msg (data converted to md5 hash): "+str(msg["md5"]))
+
+                if (isinstance(msg, dict)):
+                    msg[TIME_STAMP]=str(datetime.now())
+
+                with lock:
+                    cntr_msg_callbacks += 1
+                    if (id in msg_callbacks.keys()):
+                        msg_callbacks[id].append(msg)
+                    else:
+                        msg_callbacks[id]=[]
+                        msg_callbacks[id].append(msg)
+
+                    if (id in cntr_callbacks.keys()):
+                        cntr_callbacks[id][0] += 1
+                    else:
+                        cntr_callbacks[id]=[]
+                        cntr_callbacks[id].append(1)
+                        cntr_callbacks[id].append(0)
+        except Exception as e:
+            print(CAUGHT_EXCEPTION+str(e))
+            traceback.print_exc()
+            return 'NOTOK',500
+
+
+    except Exception as e:
+        print(CAUGHT_EXCEPTION+str(e))
+        traceback.print_exc()
+        return 'NOTOK',500
+
+    return 'OK',200
+
 ### Functions for test ###
 
 # Dump the whole db of current callbacks
diff --git a/test/cr/app/nginx.conf b/test/cr/app/nginx.conf
index e1b9ff9..32beca1 100644
--- a/test/cr/app/nginx.conf
+++ b/test/cr/app/nginx.conf
@@ -43,7 +43,10 @@
            proxy_set_header   X-Real-IP            $remote_addr;
            proxy_set_header   X-Forwarded-For      $proxy_add_x_forwarded_for;
            proxy_pass      http://localhost:2222;
+
+           client_max_body_size 0;
         }
+
     }
     ##
     # SSL Settings
diff --git a/test/mrstub/app/main.py b/test/mrstub/app/main.py
index fb6d674..4b1913f 100644
--- a/test/mrstub/app/main.py
+++ b/test/mrstub/app/main.py
@@ -69,11 +69,13 @@
 
 topic_write=""
 topic_read=""
+generic_topics_upload_baseurl=""
 
 uploader_thread=None
 downloader_thread=None
+generic_uploader_thread=None
 
-# Function to download messages from dmaap
+# Function to upload PMS messages to dmaap
 def dmaap_uploader():
     global msg_requests
     global cntr_msg_requests_fetched
@@ -107,7 +109,7 @@
         sleep(0.01)
 
 
-# Function to upload messages to dmaap
+# Function to download PMS messages from dmaap
 def dmaap_downloader():
     global msg_responses
     global cntr_msg_responses_submitted
@@ -150,6 +152,48 @@
         except Exception as e:
             sleep(1)
 
+# Function to upload generic messages to dmaap
+def dmaap_generic_uploader():
+    global msg_requests
+    global cntr_msg_requests_fetched
+
+    print("Starting generic uploader")
+
+    headers_json = {'Content-type': 'application/json', 'Accept': '*/*'}
+    headers_text = {'Content-type': 'text/plain', 'Accept': '*/*'}
+
+    while True:
+        if (len(generic_messages)):
+            for topicname in generic_messages.keys():    #topicname contains the path of the topics, eg. "/event/<topic>"
+                topic_queue=generic_messages[topicname]
+                if (len(topic_queue)>0):
+                    if (topicname.endswith(".text")):
+                        msg=topic_queue[0]
+                        headers=headers_text
+                    else:
+                        msg=topic_queue[0]
+                        msg=json.dumps(msg)
+                        headers=headers_json
+                    url=generic_topics_upload_baseurl+topicname
+                    print("Sending to dmaap : "+ url)
+                    print("Sending to dmaap : "+ msg)
+                    print("Sending to dmaap : "+ str(headers))
+                    try:
+                        resp=requests.post(url, data=msg, headers=headers, timeout=10)
+                        if (resp.status_code<199 & resp.status_code > 299):
+                            print("Failed, response code: " + str(resp.status_code))
+                            sleep(1)
+                        else:
+                            print("Dmaap response code: " + str(resp.status_code))
+                            print("Dmaap response text: " + str(resp.text))
+                            with lock:
+                                topic_queue.pop(0)
+                                cntr_msg_requests_fetched += 1
+                    except Exception as e:
+                        print("Failed, exception: "+ str(e))
+                        sleep(1)
+        sleep(0.01)
+
 #I'm alive function
 @app.route('/',
     methods=['GET'])
@@ -157,7 +201,7 @@
     return 'OK', 200
 
 
-# Helper function to create a Dmaap request message
+# Helper function to create a Dmaap PMS request message
 # args : <GET|PUT|DELETE> <correlation-id> <json-string-payload - may be None> <url>
 # response: json formatted string of a complete Dmaap message
 def create_message(operation, correlation_id, payload, url):
@@ -171,7 +215,7 @@
 
 ### MR-stub interface, for MR control
 
-# Send a message to MR
+# Send a PMS message to MR
 # URI and parameters (PUT or POST): /send-request?operation=<GET|PUT|POST|DELETE>&url=<url>
 # response: <correlation-id> (http 200) o4 400 for parameter error or 500 for other errors
 @app.route(APP_WRITE_URL,
@@ -212,7 +256,7 @@
             print(APP_WRITE_URL+"-"+CAUGHT_EXCEPTION+" "+str(e) + " "+traceback.format_exc())
             return Response(SERVER_ERROR+" "+str(e), status=500, mimetype=MIME_TEXT)
 
-# Receive a message response for MR for the included correlation id
+# Receive a PMS message response for MR for the included correlation id
 # URI and parameter, (GET): /receive-response?correlationid=<correlation-id>
 # response: <json-array of 1 response> 200 or empty 204 or other errors 500
 @app.route(APP_READ_URL,
@@ -243,7 +287,7 @@
 
 ### Dmaap interface ###
 
-# Read messages stream. URI according to agent configuration.
+# Read PMS messages stream. URI according to agent configuration.
 # URI, (GET): /events/A1-POLICY-AGENT-READ/users/policy-agent
 # response: 200 <json array of request messages>, or 500 for other errors
 @app.route(AGENT_READ_URL,
@@ -299,7 +343,7 @@
     print("timeout: "+str(timeout)+", start_time: "+str(start_time)+", current_time: "+str(current_time))
     return Response("[]", status=200, mimetype=MIME_JSON)
 
-# Write messages stream. URI according to agent configuration.
+# Write PMS messages stream. URI according to agent configuration.
 # URI and payload, (PUT or POST): /events/A1-POLICY-AGENT-WRITE <json array of response messages>
 # response: OK 200 or 400 for missing json parameters, 500 for other errors
 @app.route(AGENT_WRITE_URL,
@@ -367,10 +411,10 @@
         return Response(json.dumps(res), status=200, mimetype=MIME_JSON)
     return Response("[]", status=200, mimetype=MIME_JSON)
 
-# Generic POST/PUT catching all urls starting with /events/<topic>.
+# Generic POST catching all urls starting with /events/<topic>.
 # Writes the message in a que for that topic
 @app.route("/events/<path>",
-    methods=['PUT','POST'])
+    methods=['POST'])
 def generic_write(path):
     global generic_messages
     global cntr_msg_responses_submitted
@@ -378,8 +422,12 @@
     write_method=str(request.method)
     with lock:
         try:
-            payload=request.json
-            print(write_method+" on "+urlkey+" json=" + json.dumps(payload))
+            if (urlkey.endswith(".text")):
+                payload=str(request.data.decode('UTF-8'))
+                print(write_method+" on "+urlkey+" text=" + payload)
+            else:
+                payload=request.json
+                print(write_method+" on "+urlkey+" json=" + json.dumps(payload))
             topicmsgs=[]
             if (urlkey in generic_messages.keys()):
                 topicmsgs=generic_messages[urlkey]
@@ -407,6 +455,9 @@
     global generic_messages
     global cntr_msg_requests_fetched
 
+    if generic_topics_upload_baseurl:
+        return Response('Url not available when running as mrstub frontend', status=404, mimetype=MIME_TEXT)
+
     urlpath="/events/"+str(path)
     urlkey="/events/"+str(path).split("/")[0] #Extract topic
     print("GET on topic"+urlkey)
@@ -530,7 +581,14 @@
         uploader_thread=Thread(target=dmaap_uploader)
         uploader_thread.start()
 
-else:
+if os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] is not None:
+    print("GENERIC_TOPICS_UPLOAD_BASEURL:"+os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'])
+    generic_topics_upload_baseurl=os.environ['GENERIC_TOPICS_UPLOAD_BASEURL']
+    if generic_topics_upload_baseurl and generic_uploader_thread is None:
+        generic_uploader_thread=Thread(target=dmaap_generic_uploader)
+        generic_uploader_thread.start()
+
+if os.getenv("TOPIC_READ") is None or os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] is None:
     print("No env variables - OK")
 
 if __name__ == "__main__":
diff --git a/test/mrstub/app/nginx.conf b/test/mrstub/app/nginx.conf
index c548e56..35b5ba0 100644
--- a/test/mrstub/app/nginx.conf
+++ b/test/mrstub/app/nginx.conf
@@ -39,7 +39,8 @@
 
         # serve dynamic requests
         location / {
-        proxy_pass      http://localhost:2222;
+            proxy_pass      http://localhost:2222;
+            client_max_body_size 0;
         }
     }
     ##
diff --git a/test/simulator-group/dmaapadp/application.yaml b/test/simulator-group/dmaapadp/application.yaml
index b20a9d7..f96db09 100644
--- a/test/simulator-group/dmaapadp/application.yaml
+++ b/test/simulator-group/dmaapadp/application.yaml
@@ -68,4 +68,7 @@
   configuration-filepath: /opt/app/dmaap-adaptor-service/data/application_configuration.json
   dmaap-base-url: $MR_SERVICE_PATH
   # The url used to adress this component. This is used as a callback url sent to other components.
-  dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH
\ No newline at end of file
+  dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH
+  # KAFKA boostrap server. This is only needed if there are Information Types that uses a kafkaInputTopic
+  kafka:
+    bootstrap-servers: $MR_KAFKA_SERVICE_PATH
diff --git a/test/simulator-group/dmaapadp/application_configuration.json b/test/simulator-group/dmaapadp/application_configuration.json
index b6605e3..e36d910 100644
--- a/test/simulator-group/dmaapadp/application_configuration.json
+++ b/test/simulator-group/dmaapadp/application_configuration.json
@@ -2,8 +2,13 @@
   "types": [
      {
         "id": "ExampleInformationType",
-        "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs",
+        "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=15000&limit=100",
         "useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG}
-     }
+     },
+     {
+      "id": "ExampleInformationTypeKafka",
+      "kafkaInputTopic": "unauthenticated.dmaapadp_kafka.text",
+      "useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG}
+   }
   ]
 }
\ No newline at end of file
diff --git a/test/simulator-group/dmaapadp/mnt/.gitignore b/test/simulator-group/dmaapadp/mnt/.gitignore
new file mode 100644
index 0000000..cdf0793
--- /dev/null
+++ b/test/simulator-group/dmaapadp/mnt/.gitignore
@@ -0,0 +1,17 @@
+################################################################################
+#   Copyright (c) 2021 Nordix Foundation.                                      #
+#                                                                              #
+#   Licensed under the Apache License, Version 2.0 (the "License");            #
+#   you may not use this file except in compliance with the License.           #
+#   You may obtain a copy of the License at                                    #
+#                                                                              #
+#       http://www.apache.org/licenses/LICENSE-2.0                             #
+#                                                                              #
+#   Unless required by applicable law or agreed to in writing, software        #
+#   distributed under the License is distributed on an "AS IS" BASIS,          #
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+#   See the License for the specific language governing permissions and        #
+#   limitations under the License.                                             #
+################################################################################
+*
+!.gitignore
diff --git a/test/simulator-group/dmaapmed/app.yaml b/test/simulator-group/dmaapmed/app.yaml
index e0296fa..aa8a0f1 100644
--- a/test/simulator-group/dmaapmed/app.yaml
+++ b/test/simulator-group/dmaapmed/app.yaml
@@ -40,7 +40,7 @@
         - name: DMAAP_MR_ADDR
           value: "$MR_SERVICE_PATH"
         - name: LOG_LEVEL
-          value: "Debug"
+          value: Debug
       volumes:
       - configMap:
           defaultMode: 420
diff --git a/test/simulator-group/dmaapmed/docker-compose.yml b/test/simulator-group/dmaapmed/docker-compose.yml
index 21fe551..d0672df 100644
--- a/test/simulator-group/dmaapmed/docker-compose.yml
+++ b/test/simulator-group/dmaapmed/docker-compose.yml
@@ -32,7 +32,7 @@
       - INFO_PRODUCER_PORT=${DMAAP_MED_CONF_SELF_PORT}
       - INFO_COORD_ADDR=${ECS_SERVICE_PATH}
       - DMAAP_MR_ADDR=${MR_SERVICE_PATH}
-      - LOG_LEVEL="Debug"
+      - LOG_LEVEL=Debug
     volumes:
     - ${DMAAP_MED_HOST_MNT_DIR}/$DMAAP_MED_DATA_FILE:${DMAAP_MED_DATA_MOUNT_PATH}/$DMAAP_MED_DATA_FILE
     labels:
diff --git a/test/simulator-group/dmaapmed/mnt/.gitignore b/test/simulator-group/dmaapmed/mnt/.gitignore
new file mode 100644
index 0000000..b94353c
--- /dev/null
+++ b/test/simulator-group/dmaapmed/mnt/.gitignore
@@ -0,0 +1,17 @@
+################################################################################
+#   Copyright (c) 2021 Nordix Foundation.                                      #
+#                                                                              #
+#   Licensed under the Apache License, Version 2.0 (the "License");            #
+#   you may not use this file except in compliance with the License.           #
+#   You may obtain a copy of the License at                                    #
+#                                                                              #
+#       http://www.apache.org/licenses/LICENSE-2.0                             #
+#                                                                              #
+#   Unless required by applicable law or agreed to in writing, software        #
+#   distributed under the License is distributed on an "AS IS" BASIS,          #
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+#   See the License for the specific language governing permissions and        #
+#   limitations under the License.                                             #
+################################################################################
+*
+!.gitignore
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmed/type_config.json b/test/simulator-group/dmaapmed/type_config.json
index 8a67226..ddb776f 100644
--- a/test/simulator-group/dmaapmed/type_config.json
+++ b/test/simulator-group/dmaapmed/type_config.json
@@ -3,7 +3,7 @@
      [
        {
          "id": "STD_Fault_Messages",
-         "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages"
+         "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=15000&limit=100"
        }
    ]
  }
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/app.yaml b/test/simulator-group/dmaapmr/app.yaml
index 2b39d15..a4ecc91 100644
--- a/test/simulator-group/dmaapmr/app.yaml
+++ b/test/simulator-group/dmaapmr/app.yaml
@@ -1,24 +1,24 @@
 apiVersion: apps/v1
 kind: Deployment
 metadata:
-  name: $MR_DMAAP_KUBE_APP_NAME
+  name: $MR_DMAAP_APP_NAME
   namespace: $KUBE_ONAP_NAMESPACE
   labels:
-    run: $MR_DMAAP_KUBE_APP_NAME
+    run: $MR_DMAAP_APP_NAME
     autotest: DMAAPMR
 spec:
   replicas: 1
   selector:
     matchLabels:
-      run: $MR_DMAAP_KUBE_APP_NAME
+      run: $MR_DMAAP_APP_NAME
   template:
     metadata:
       labels:
-        run: $MR_DMAAP_KUBE_APP_NAME
+        run: $MR_DMAAP_APP_NAME
         autotest: DMAAPMR
     spec:
       containers:
-      - name: $MR_DMAAP_KUBE_APP_NAME
+      - name: $MR_DMAAP_APP_NAME
         image: $ONAP_DMAAPMR_IMAGE
         imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
         ports:
@@ -33,11 +33,9 @@
         - mountPath: /appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
           subPath: MsgRtrApi.properties
           name: dmaapmr-msg-rtr-api
-        volumeMounts:
         - mountPath: /appl/dmaapMR1/bundleconfig/etc/logback.xml
           subPath: logback.xml
           name: dmaapmr-log-back
-        volumeMounts:
         - mountPath: /appl/dmaapMR1/etc/cadi.properties
           subPath: cadi.properties
           name: dmaapmr-cadi
@@ -58,34 +56,34 @@
 apiVersion: apps/v1
 kind: Deployment
 metadata:
-  name: $MR_KAFKA_BWDS_NAME
+  name: $MR_KAFKA_APP_NAME
   namespace: $KUBE_ONAP_NAMESPACE
   labels:
-    run: $MR_KAFKA_BWDS_NAME
+    run: $MR_KAFKA_APP_NAME
     autotest: DMAAPMR
 spec:
   replicas: 1
   selector:
     matchLabels:
-      run: $MR_KAFKA_BWDS_NAME
+      run: $MR_KAFKA_APP_NAME
   template:
     metadata:
       labels:
-        run: $MR_KAFKA_BWDS_NAME
+        run: $MR_KAFKA_APP_NAME
         autotest: DMAAPMR
     spec:
       containers:
-      - name: $MR_KAFKA_BWDS_NAME
+      - name: $MR_KAFKA_APP_NAME
         image: $ONAP_KAFKA_IMAGE
         imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
         ports:
         - name: http
-          containerPort: 9095
+          containerPort: $MR_KAFKA_PORT
         env:
         - name: enableCadi
           value: 'false'
         - name: KAFKA_ZOOKEEPER_CONNECT
-          value: 'zookeeper.onap:2181'
+          value: '$MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT'
         - name: KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS
           value: '40000'
         - name: KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS
@@ -93,11 +91,11 @@
         - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
           value: 'INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT'
         - name: KAFKA_ADVERTISED_LISTENERS
-          value: 'INTERNAL_PLAINTEXT://kaka:9092'
-#        - name: KAFKA_ADVERTISED_LISTENERS
-#          value: 'INTERNAL_PLAINTEXT://localhost:9092'
+          value: 'INTERNAL_PLAINTEXT://$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT'
         - name: KAFKA_LISTENERS
-          value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9095,INTERNAL_PLAINTEXT://0.0.0.0:9092'
+          value: 'INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT'
+        # - name: KAFKA_LISTENERS
+        #   value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9091,INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT'
         - name: KAFKA_INTER_BROKER_LISTENER_NAME
           value: INTERNAL_PLAINTEXT
         - name: KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE
@@ -105,12 +103,11 @@
         - name: KAFKA_OPTS
           value: '-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf'
         - name: KAFKA_ZOOKEEPER_SET_ACL
-          value: 'true'
+          value: 'false'
         - name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR
           value: '1'
         - name: KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS
           value: '1'
-
         volumeMounts:
         - mountPath: /etc/kafka/secrets/jaas/zk_client_jaas.conf
           subPath: zk_client_jaas.conf
@@ -146,7 +143,7 @@
         imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
         ports:
         - name: http
-          containerPort: 2181
+          containerPort: $MR_ZOOKEEPER_PORT
         env:
         - name: ZOOKEEPER_REPLICAS
           value: '1'
@@ -163,7 +160,7 @@
         - name: ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL
           value: '24'
         - name: ZOOKEEPER_CLIENT_PORT
-          value: '2181'
+          value: '$MR_ZOOKEEPER_PORT'
         - name: KAFKA_OPTS
           value: '-Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl'
         - name: ZOOKEEPER_SERVER_ID
diff --git a/test/simulator-group/dmaapmr/mnt2/kafka/zk_client_jaas.conf b/test/simulator-group/dmaapmr/configs/kafka/zk_client_jaas.conf
similarity index 100%
rename from test/simulator-group/dmaapmr/mnt2/kafka/zk_client_jaas.conf
rename to test/simulator-group/dmaapmr/configs/kafka/zk_client_jaas.conf
diff --git a/test/simulator-group/dmaapmr/mnt2/mr/MsgRtrApi.properties b/test/simulator-group/dmaapmr/configs/mr/MsgRtrApi.properties
similarity index 96%
rename from test/simulator-group/dmaapmr/mnt2/mr/MsgRtrApi.properties
rename to test/simulator-group/dmaapmr/configs/mr/MsgRtrApi.properties
index 4764321..3e0b001 100644
--- a/test/simulator-group/dmaapmr/mnt2/mr/MsgRtrApi.properties
+++ b/test/simulator-group/dmaapmr/configs/mr/MsgRtrApi.properties
@@ -1,6 +1,7 @@
 # LICENSE_START=======================================================
 #  org.onap.dmaap
 #  ================================================================================
+#  Copyright © 2021 Nordix Foundation. All rights reserved.
 #  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
 #  ================================================================================
 #  Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,7 +35,7 @@
 ##
 ## Both Cambria and Kafka make use of Zookeeper.
 ##
-config.zk.servers=zookeeper:2181
+config.zk.servers=$MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT
 
 ###############################################################################
 ##
@@ -45,7 +46,7 @@
 ##        if you want to change request.required.acks it can take this one value
 #kafka.metadata.broker.list=localhost:9092,localhost:9093
 #kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kafka:9092
+kafka.metadata.broker.list=$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT
 ##kafka.request.required.acks=-1
 #kafka.client.zookeeper=${config.zk.servers}
 consumer.timeout.ms=100
@@ -135,7 +136,7 @@
 cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
 consumer.timeout=17
 default.partitions=3
-default.replicas=3
+default.replicas=1
 ##############################################################################
 #100mb
 maxcontentlength=10000
diff --git a/test/simulator-group/dmaapmr/configs/mr/cadi.properties b/test/simulator-group/dmaapmr/configs/mr/cadi.properties
new file mode 100644
index 0000000..6178e42
--- /dev/null
+++ b/test/simulator-group/dmaapmr/configs/mr/cadi.properties
@@ -0,0 +1,38 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+#Removed to be disable aaf in test env
+#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
+aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
+aaf_env=DEV
+aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
+
+#Removed to be disable aaf in test env
+# cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+# cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
+
+cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
+
+cadi_alias=dmaapmr@mr.dmaap.onap.org
+cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
+cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
+cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
+
+cadi_loglevel=INFO
+cadi_protocols=TLSv1.1,TLSv1.2
+cadi_latitude=37.78187
+cadi_longitude=-122.26147
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt2/mr/logback.xml b/test/simulator-group/dmaapmr/configs/mr/logback.xml
similarity index 98%
rename from test/simulator-group/dmaapmr/mnt2/mr/logback.xml
rename to test/simulator-group/dmaapmr/configs/mr/logback.xml
index f02a2db..e60e8da 100644
--- a/test/simulator-group/dmaapmr/mnt2/mr/logback.xml
+++ b/test/simulator-group/dmaapmr/configs/mr/logback.xml
@@ -1,5 +1,6 @@
 <!--
      ============LICENSE_START=======================================================
+     Copyright © 2021 Nordix Foundation. All rights reserved.
      Copyright © 2019 AT&T Intellectual Property. All rights reserved.
      ================================================================================
      Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/test/simulator-group/dmaapmr/mnt2/zk/zk_server_jaas.conf b/test/simulator-group/dmaapmr/configs/zk/zk_server_jaas.conf
similarity index 100%
rename from test/simulator-group/dmaapmr/mnt2/zk/zk_server_jaas.conf
rename to test/simulator-group/dmaapmr/configs/zk/zk_server_jaas.conf
diff --git a/test/simulator-group/dmaapmr/docker-compose.yml b/test/simulator-group/dmaapmr/docker-compose.yml
index 6b5c9c2..f9a5f21 100644
--- a/test/simulator-group/dmaapmr/docker-compose.yml
+++ b/test/simulator-group/dmaapmr/docker-compose.yml
@@ -26,7 +26,7 @@
     image: $ONAP_ZOOKEEPER_IMAGE
     container_name: $MR_ZOOKEEPER_APP_NAME
     ports:
-      - "2181:2181"
+      - "$MR_ZOOKEEPER_PORT:$MR_ZOOKEEPER_PORT"
     environment:
      ZOOKEEPER_REPLICAS: 1
      ZOOKEEPER_TICK_TIME: 2000
@@ -35,7 +35,7 @@
      ZOOKEEPER_MAX_CLIENT_CNXNS: 200
      ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT: 3
      ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL: 24
-     ZOOKEEPER_CLIENT_PORT: 2181
+     ZOOKEEPER_CLIENT_PORT: $MR_ZOOKEEPER_PORT
      KAFKA_OPTS: -Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl -Dzookeeper.4lw.commands.whitelist=*
      ZOOKEEPER_SERVER_ID: 1
     volumes:
@@ -50,15 +50,15 @@
    image: $ONAP_KAFKA_IMAGE
    container_name: $MR_KAFKA_APP_NAME
    ports:
-    - "9092:9092"
+    - "$MR_KAFKA_PORT:$MR_KAFKA_PORT"
    environment:
     enableCadi: 'false'
-    KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+    KAFKA_ZOOKEEPER_CONNECT: $MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT
     KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 40000
     KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: 40000
     KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT
-    KAFKA_ADVERTISED_LISTENERS: INTERNAL_PLAINTEXT://kafka:9092
-    KAFKA_LISTENERS: INTERNAL_PLAINTEXT://0.0.0.0:9092
+    KAFKA_ADVERTISED_LISTENERS: INTERNAL_PLAINTEXT://$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT
+    KAFKA_LISTENERS: INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT
     KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL_PLAINTEXT
     KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
     KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf
diff --git a/test/simulator-group/dmaapmr/mnt/.gitignore b/test/simulator-group/dmaapmr/mnt/.gitignore
new file mode 100644
index 0000000..b94353c
--- /dev/null
+++ b/test/simulator-group/dmaapmr/mnt/.gitignore
@@ -0,0 +1,17 @@
+################################################################################
+#   Copyright (c) 2021 Nordix Foundation.                                      #
+#                                                                              #
+#   Licensed under the Apache License, Version 2.0 (the "License");            #
+#   you may not use this file except in compliance with the License.           #
+#   You may obtain a copy of the License at                                    #
+#                                                                              #
+#       http://www.apache.org/licenses/LICENSE-2.0                             #
+#                                                                              #
+#   Unless required by applicable law or agreed to in writing, software        #
+#   distributed under the License is distributed on an "AS IS" BASIS,          #
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+#   See the License for the specific language governing permissions and        #
+#   limitations under the License.                                             #
+################################################################################
+*
+!.gitignore
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt/kafka/zk_client_jaas.conf b/test/simulator-group/dmaapmr/mnt/kafka/zk_client_jaas.conf
index dca46d5..79a7601 100644
--- a/test/simulator-group/dmaapmr/mnt/kafka/zk_client_jaas.conf
+++ b/test/simulator-group/dmaapmr/mnt/kafka/zk_client_jaas.conf
@@ -1,6 +1,5 @@
 Client {
-   org.apache.zookeeper.server.auth.DigestLoginModule required
-   username="kafka"
-   password="kafka_secret";
- };
-
+  org.apache.zookeeper.server.auth.DigestLoginModule required
+  username="kafka"
+  password="kafka_secret";
+ };
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt/mr/KUBE-MsgRtrApi.properties b/test/simulator-group/dmaapmr/mnt/mr/KUBE-MsgRtrApi.properties
deleted file mode 100644
index e174b6f..0000000
--- a/test/simulator-group/dmaapmr/mnt/mr/KUBE-MsgRtrApi.properties
+++ /dev/null
@@ -1,174 +0,0 @@
-# LICENSE_START=======================================================
-#  org.onap.dmaap
-#  ================================================================================
-#  Copyright © 2020 Nordix Foundation. All rights reserved.
-#  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
-#  ================================================================================
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#  ============LICENSE_END=========================================================
-#
-#  ECOMP is a trademark and service mark of AT&T Intellectual Property.
-#
-###############################################################################
-###############################################################################
-##
-## Cambria API Server config
-##
-## Default values are shown as commented settings.
-##
-###############################################################################
-##
-## HTTP service
-##
-## 3904 is standard as of 7/29/14.
-#
-## Zookeeper Connection
-##
-## Both Cambria and Kafka make use of Zookeeper.
-##
-#config.zk.servers=172.18.1.1
-#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
-config.zk.servers=zookeeper.onap:2181
-
-#config.zk.root=/fe3c/cambria/config
-
-
-###############################################################################
-##
-## Kafka Connection
-##
-##        Items below are passed through to Kafka's producer and consumer
-##        configurations (after removing "kafka.")
-##        if you want to change request.required.acks it can take this one value
-#kafka.metadata.broker.list=localhost:9092,localhost:9093
-#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=akfak-bwds.onap:9092
-##kafka.request.required.acks=-1
-#kafka.client.zookeeper=${config.zk.servers}
-consumer.timeout.ms=100
-zookeeper.connection.timeout.ms=6000
-zookeeper.session.timeout.ms=20000
-zookeeper.sync.time.ms=2000
-auto.commit.interval.ms=1000
-fetch.message.max.bytes =1000000
-auto.commit.enable=false
-
-#(backoff*retries > zksessiontimeout)
-kafka.rebalance.backoff.ms=10000
-kafka.rebalance.max.retries=6
-
-
-###############################################################################
-##
-##        Secured Config
-##
-##        Some data stored in the config system is sensitive -- API keys and secrets,
-##        for example. to protect it, we use an encryption layer for this section
-##        of the config.
-##
-## The key is a base64 encode AES key. This must be created/configured for
-## each installation.
-#cambria.secureConfig.key=
-##
-## The initialization vector is a 16 byte value specific to the secured store.
-## This must be created/configured for each installation.
-#cambria.secureConfig.iv=
-
-## Southfield Sandbox
-cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
-cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
-authentication.adminSecret=fe3cCompound
-#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
-#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
-
-
-###############################################################################
-##
-## Consumer Caching
-##
-##        Kafka expects live connections from the consumer to the broker, which
-##        obviously doesn't work over connectionless HTTP requests. The Cambria
-##        server proxies HTTP requests into Kafka consumer sessions that are kept
-##        around for later re-use. Not doing so is costly for setup per request,
-##        which would substantially impact a high volume consumer's performance.
-##
-##        This complicates Cambria server failover, because we often need server
-##        A to close its connection before server B brings up the replacement.
-##
-
-## The consumer cache is normally enabled.
-#cambria.consumer.cache.enabled=true
-
-## Cached consumers are cleaned up after a period of disuse. The server inspects
-## consumers every sweepFreqSeconds and will clean up any connections that are
-## dormant for touchFreqMs.
-#cambria.consumer.cache.sweepFreqSeconds=15
-cambria.consumer.cache.touchFreqMs=120000
-##stickforallconsumerrequests=false
-## The cache is managed through ZK. The default value for the ZK connection
-## string is the same as config.zk.servers.
-#cambria.consumer.cache.zkConnect=${config.zk.servers}
-
-##
-## Shared cache information is associated with this node's name. The default
-## name is the hostname plus the HTTP service port this host runs on. (The
-## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
-## which is not always adequate.) You can set this value explicitly here.
-##
-#cambria.api.node.identifier=<use-something-unique-to-this-instance>
-
-#cambria.rateLimit.maxEmptyPollsPerMinute=30
-#cambria.rateLimitActual.delay.ms=10
-
-###############################################################################
-##
-## Metrics Reporting
-##
-##        This server can report its metrics periodically on a topic.
-##
-#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics                                  #msgrtr.apinode.metrics.dmaap
-#metrics.send.cambria.sendEverySeconds=60
-
-cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
-consumer.timeout=17
-default.partitions=3
-default.replicas=3
-##############################################################################
-#100mb
-maxcontentlength=10000
-
-
-##############################################################################
-#AAF Properties
-msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
-msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-enforced.topic.name.AAF=org.onap.dmaap.mr
-forceAAF=false
-transidUEBtopicreqd=false
-defaultNSforUEB=org.onap.dmaap.mr
-##############################################################################
-#Mirror Maker Agent
-
-msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
-msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
-msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-msgRtr.mirrormaker.timeout=15000
-msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
-msgRtr.mirrormaker.consumergroup=mmagentserver
-msgRtr.mirrormaker.consumerid=1
-
-kafka.max.poll.interval.ms=300000
-kafka.heartbeat.interval.ms=60000
-kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
-
diff --git a/test/simulator-group/dmaapmr/mnt/mr/MsgRtrApi.properties b/test/simulator-group/dmaapmr/mnt/mr/MsgRtrApi.properties
index dc5ddd7..3476d5d 100644
--- a/test/simulator-group/dmaapmr/mnt/mr/MsgRtrApi.properties
+++ b/test/simulator-group/dmaapmr/mnt/mr/MsgRtrApi.properties
@@ -1,8 +1,7 @@
 # LICENSE_START=======================================================
 #  org.onap.dmaap
 #  ================================================================================
-#  Copyright © 2020 Nordix Foundation. All rights reserved.
-#  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+#  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
 #  ================================================================================
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -35,13 +34,8 @@
 ##
 ## Both Cambria and Kafka make use of Zookeeper.
 ##
-#config.zk.servers=172.18.1.1
-#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
 config.zk.servers=zookeeper:2181
 
-#config.zk.root=/fe3c/cambria/config
-
-
 ###############################################################################
 ##
 ## Kafka Connection
@@ -51,7 +45,7 @@
 ##        if you want to change request.required.acks it can take this one value
 #kafka.metadata.broker.list=localhost:9092,localhost:9093
 #kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kafka:9092
+kafka.metadata.broker.list=message-router-kafka:9092
 ##kafka.request.required.acks=-1
 #kafka.client.zookeeper=${config.zk.servers}
 consumer.timeout.ms=100
@@ -87,8 +81,6 @@
 cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
 cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
 authentication.adminSecret=fe3cCompound
-#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
-#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
 
 
 ###############################################################################
@@ -136,13 +128,14 @@
 ##        This server can report its metrics periodically on a topic.
 ##
 #metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics                                  #msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.topic=cambria.apinode.metrics
+#msgrtr.apinode.metrics.dmaap
 #metrics.send.cambria.sendEverySeconds=60
 
 cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
 consumer.timeout=17
 default.partitions=3
-default.replicas=3
+default.replicas=1
 ##############################################################################
 #100mb
 maxcontentlength=10000
@@ -170,5 +163,4 @@
 kafka.max.poll.interval.ms=300000
 kafka.heartbeat.interval.ms=60000
 kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
-
+kafka.max.poll.records=1000
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt/mr/cadi.properties b/test/simulator-group/dmaapmr/mnt/mr/cadi.properties
index 4d28b52..cccfbdf 100644
--- a/test/simulator-group/dmaapmr/mnt/mr/cadi.properties
+++ b/test/simulator-group/dmaapmr/mnt/mr/cadi.properties
@@ -1,20 +1,21 @@
-aaf_locate_url=https://aaf-locate.{{ include "common.namespace" . }}:8095
+#Removed to be disable aaf in test env
+#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
 aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
 aaf_env=DEV
 aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
 
-cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
-cadi_truststore_password=enc:mN6GiIzFQxKGDzAXDOs7b4j8DdIX02QrZ9QOWNRpxV3rD6whPCfizSMZkJwxi_FJ
+#Removed to be disable aaf in test env
+# cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+# cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
 
 cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
 
 cadi_alias=dmaapmr@mr.dmaap.onap.org
 cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
-cadi_keystore_password=enc:_JJT2gAEkRzXla5xfDIHal8pIoIB5iIos3USvZQT6sL-l14LpI5fRFR_QIGUCh5W
+cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
 cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
 
 cadi_loglevel=INFO
 cadi_protocols=TLSv1.1,TLSv1.2
 cadi_latitude=37.78187
-cadi_longitude=-122.26147
-
+cadi_longitude=-122.26147
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt/mr/logback.xml b/test/simulator-group/dmaapmr/mnt/mr/logback.xml
index 02499fb..f02a2db 100644
--- a/test/simulator-group/dmaapmr/mnt/mr/logback.xml
+++ b/test/simulator-group/dmaapmr/mnt/mr/logback.xml
@@ -1,7 +1,6 @@
 <!--
      ============LICENSE_START=======================================================
-     Copyright © 2020 Nordix Foundation. All rights reserved.
-     Copyright © 2019 AT&T Intellectual Property. All rights reserved.
+     Copyright © 2019 AT&T Intellectual Property. All rights reserved.
      ================================================================================
      Licensed under the Apache License, Version 2.0 (the "License");
      you may not use this file except in compliance with the License.
@@ -207,4 +206,3 @@
   </root>
 
 </configuration>
-
diff --git a/test/simulator-group/dmaapmr/mnt/zk/zk_server_jaas.conf b/test/simulator-group/dmaapmr/mnt/zk/zk_server_jaas.conf
index 9a32a72..3d2767f 100644
--- a/test/simulator-group/dmaapmr/mnt/zk/zk_server_jaas.conf
+++ b/test/simulator-group/dmaapmr/mnt/zk/zk_server_jaas.conf
@@ -1,5 +1,4 @@
 Server {
        org.apache.zookeeper.server.auth.DigestLoginModule required
-       user_kafka=kafka_secret;
-};
-
+       user_kafka="kafka_secret";
+};
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt2/mr/KUBE-MsgRtrApi.properties b/test/simulator-group/dmaapmr/mnt2/mr/KUBE-MsgRtrApi.properties
deleted file mode 100644
index 7f7bc41..0000000
--- a/test/simulator-group/dmaapmr/mnt2/mr/KUBE-MsgRtrApi.properties
+++ /dev/null
@@ -1,166 +0,0 @@
-# LICENSE_START=======================================================
-#  org.onap.dmaap
-#  ================================================================================
-#  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
-#  ================================================================================
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#  ============LICENSE_END=========================================================
-#
-#  ECOMP is a trademark and service mark of AT&T Intellectual Property.
-#
-###############################################################################
-###############################################################################
-##
-## Cambria API Server config
-##
-## Default values are shown as commented settings.
-##
-###############################################################################
-##
-## HTTP service
-##
-## 3904 is standard as of 7/29/14.
-#
-## Zookeeper Connection
-##
-## Both Cambria and Kafka make use of Zookeeper.
-##
-config.zk.servers=zookeeper:2181
-
-###############################################################################
-##
-## Kafka Connection
-##
-##        Items below are passed through to Kafka's producer and consumer
-##        configurations (after removing "kafka.")
-##        if you want to change request.required.acks it can take this one value
-#kafka.metadata.broker.list=localhost:9092,localhost:9093
-#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kaka:9092
-##kafka.request.required.acks=-1
-#kafka.client.zookeeper=${config.zk.servers}
-consumer.timeout.ms=100
-zookeeper.connection.timeout.ms=6000
-zookeeper.session.timeout.ms=20000
-zookeeper.sync.time.ms=2000
-auto.commit.interval.ms=1000
-fetch.message.max.bytes =1000000
-auto.commit.enable=false
-
-#(backoff*retries > zksessiontimeout)
-kafka.rebalance.backoff.ms=10000
-kafka.rebalance.max.retries=6
-
-
-###############################################################################
-##
-##        Secured Config
-##
-##        Some data stored in the config system is sensitive -- API keys and secrets,
-##        for example. to protect it, we use an encryption layer for this section
-##        of the config.
-##
-## The key is a base64 encode AES key. This must be created/configured for
-## each installation.
-#cambria.secureConfig.key=
-##
-## The initialization vector is a 16 byte value specific to the secured store.
-## This must be created/configured for each installation.
-#cambria.secureConfig.iv=
-
-## Southfield Sandbox
-cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
-cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
-authentication.adminSecret=fe3cCompound
-
-
-###############################################################################
-##
-## Consumer Caching
-##
-##        Kafka expects live connections from the consumer to the broker, which
-##        obviously doesn't work over connectionless HTTP requests. The Cambria
-##        server proxies HTTP requests into Kafka consumer sessions that are kept
-##        around for later re-use. Not doing so is costly for setup per request,
-##        which would substantially impact a high volume consumer's performance.
-##
-##        This complicates Cambria server failover, because we often need server
-##        A to close its connection before server B brings up the replacement.
-##
-
-## The consumer cache is normally enabled.
-#cambria.consumer.cache.enabled=true
-
-## Cached consumers are cleaned up after a period of disuse. The server inspects
-## consumers every sweepFreqSeconds and will clean up any connections that are
-## dormant for touchFreqMs.
-#cambria.consumer.cache.sweepFreqSeconds=15
-cambria.consumer.cache.touchFreqMs=120000
-##stickforallconsumerrequests=false
-## The cache is managed through ZK. The default value for the ZK connection
-## string is the same as config.zk.servers.
-#cambria.consumer.cache.zkConnect=${config.zk.servers}
-
-##
-## Shared cache information is associated with this node's name. The default
-## name is the hostname plus the HTTP service port this host runs on. (The
-## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
-## which is not always adequate.) You can set this value explicitly here.
-##
-#cambria.api.node.identifier=<use-something-unique-to-this-instance>
-
-#cambria.rateLimit.maxEmptyPollsPerMinute=30
-#cambria.rateLimitActual.delay.ms=10
-
-###############################################################################
-##
-## Metrics Reporting
-##
-##        This server can report its metrics periodically on a topic.
-##
-#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics
-#msgrtr.apinode.metrics.dmaap
-#metrics.send.cambria.sendEverySeconds=60
-
-cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
-consumer.timeout=17
-default.partitions=3
-default.replicas=3
-##############################################################################
-#100mb
-maxcontentlength=10000
-
-
-##############################################################################
-#AAF Properties
-msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
-msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-enforced.topic.name.AAF=org.onap.dmaap.mr
-forceAAF=false
-transidUEBtopicreqd=false
-defaultNSforUEB=org.onap.dmaap.mr
-##############################################################################
-#Mirror Maker Agent
-
-msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
-msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
-msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-msgRtr.mirrormaker.timeout=15000
-msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
-msgRtr.mirrormaker.consumergroup=mmagentserver
-msgRtr.mirrormaker.consumerid=1
-
-kafka.max.poll.interval.ms=300000
-kafka.heartbeat.interval.ms=60000
-kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt2/mr/cadi.properties b/test/simulator-group/dmaapmr/mnt2/mr/cadi.properties
deleted file mode 100644
index 3cd26ad..0000000
--- a/test/simulator-group/dmaapmr/mnt2/mr/cadi.properties
+++ /dev/null
@@ -1,19 +0,0 @@
-#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
-aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
-aaf_env=DEV
-aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
-
-cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
-cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
-
-cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
-
-cadi_alias=dmaapmr@mr.dmaap.onap.org
-cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
-cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
-cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
-
-cadi_loglevel=INFO
-cadi_protocols=TLSv1.1,TLSv1.2
-cadi_latitude=37.78187
-cadi_longitude=-122.26147
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/svc.yaml b/test/simulator-group/dmaapmr/svc.yaml
index e5d5d8e..7fb0962 100644
--- a/test/simulator-group/dmaapmr/svc.yaml
+++ b/test/simulator-group/dmaapmr/svc.yaml
@@ -1,10 +1,10 @@
 apiVersion: v1
 kind: Service
 metadata:
-  name: $MR_DMAAP_KUBE_APP_NAME
+  name: $MR_DMAAP_APP_NAME
   namespace: $KUBE_ONAP_NAMESPACE
   labels:
-    run: $MR_DMAAP_KUBE_APP_NAME
+    run: $MR_DMAAP_APP_NAME
     autotest: DMAAPMR
 spec:
   type: ClusterIP
@@ -18,25 +18,25 @@
     protocol: TCP
     name: https
   selector:
-    run: $MR_DMAAP_KUBE_APP_NAME
+    run: $MR_DMAAP_APP_NAME
 ---
 apiVersion: v1
 kind: Service
 metadata:
-  name: $MR_KAFKA_BWDS_NAME
+  name: $MR_KAFKA_APP_NAME
   namespace: $KUBE_ONAP_NAMESPACE
   labels:
-    run: $MR_KAFKA_BWDS_NAME
+    run: $MR_KAFKA_APP_NAME
     autotest: DMAAPMR
 spec:
   type: ClusterIP
   ports:
-  - port: 9092
-    targetPort: 9095
+  - port: $MR_KAFKA_PORT
+    targetPort: $MR_KAFKA_PORT
     protocol: TCP
     name: http
   selector:
-    run: $MR_KAFKA_BWDS_NAME
+    run: $MR_KAFKA_APP_NAME
 ---
 apiVersion: v1
 kind: Service
@@ -49,87 +49,9 @@
 spec:
   type: ClusterIP
   ports:
-  - port: 2181
-    targetPort: 2181
+  - port: $MR_ZOOKEEPER_PORT
+    targetPort: $MR_ZOOKEEPER_PORT
     protocol: TCP
     name: http
   selector:
     run: $MR_ZOOKEEPER_APP_NAME
-
-
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-#   name: dmaap-mr
-#   namespace: $KUBE_ONAP_NAMESPACE
-#   labels:
-#     run: $MR_DMAAP_KUBE_APP_NAME
-#     autotest: DMAAPMR
-# spec:
-#   type: ClusterIP
-#   ports:
-#   - port: $MR_EXTERNAL_PORT
-#     targetPort: $MR_INTERNAL_PORT
-#     protocol: TCP
-#     name: http
-#   - port: $MR_EXTERNAL_SECURE_PORT
-#     targetPort: $MR_INTERNAL_SECURE_PORT
-#     protocol: TCP
-#     name: https
-#   selector:
-#     run: $MR_DMAAP_KUBE_APP_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-#   name: dmaap-kafka
-#   namespace: $KUBE_ONAP_NAMESPACE
-#   labels:
-#     run: $MR_KAFKA_BWDS_NAME
-#     autotest: DMAAPMR
-# spec:
-#   type: ClusterIP
-#   ports:
-#   - port: 9092
-#     targetPort: 9092
-#     protocol: TCP
-#     name: http
-#   selector:
-#     run: $MR_KAFKA_BWDS_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-#   name: kafka
-#   namespace: $KUBE_ONAP_NAMESPACE
-#   labels:
-#     run: $MR_KAFKA_BWDS_NAME
-#     autotest: DMAAPMR
-# spec:
-#   type: ClusterIP
-#   ports:
-#   - port: 9092
-#     targetPort: 9092
-#     protocol: TCP
-#     name: http
-#   selector:
-#     run: $MR_KAFKA_BWDS_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-#   name: dmaap-zookeeper
-#   namespace: $KUBE_ONAP_NAMESPACE
-#   labels:
-#     run: $MR_ZOOKEEPER_APP_NAME
-#     autotest: DMAAPMR
-# spec:
-#   type: ClusterIP
-#   ports:
-#   - port: 2181
-#     targetPort: 2181
-#     protocol: TCP
-#     name: http
-#   selector:
-    run: $MR_ZOOKEEPER_APP_NAME
\ No newline at end of file
diff --git a/test/simulator-group/mrstub/app.yaml b/test/simulator-group/mrstub/app.yaml
index 0cf0f51..696af4e 100644
--- a/test/simulator-group/mrstub/app.yaml
+++ b/test/simulator-group/mrstub/app.yaml
@@ -30,4 +30,6 @@
         - name: TOPIC_READ
           value: $TOPIC_READ
         - name: TOPIC_WRITE
-          value: $TOPIC_WRITE
\ No newline at end of file
+          value: $TOPIC_WRITE
+        - name: GENERIC_TOPICS_UPLOAD_BASEURL
+          value: $GENERIC_TOPICS_UPLOAD_BASEURL
\ No newline at end of file
diff --git a/test/simulator-group/mrstub/docker-compose.yml b/test/simulator-group/mrstub/docker-compose.yml
index 9101b5b..a1c96c0 100644
--- a/test/simulator-group/mrstub/docker-compose.yml
+++ b/test/simulator-group/mrstub/docker-compose.yml
@@ -34,6 +34,7 @@
     environment:
       - TOPIC_READ=${TOPIC_READ}
       - TOPIC_WRITE=${TOPIC_WRITE}
+      - GENERIC_TOPICS_UPLOAD_BASEURL=${GENERIC_TOPICS_UPLOAD_BASEURL}
     labels:
       - "nrttest_app=MR"
       - "nrttest_dp=${MR_STUB_DISPLAY_NAME}"
diff --git a/test/simulator-group/sdnc/app.yaml b/test/simulator-group/sdnc/app.yaml
index 45f0f08..c794e67 100644
--- a/test/simulator-group/sdnc/app.yaml
+++ b/test/simulator-group/sdnc/app.yaml
@@ -2,7 +2,7 @@
 kind: Deployment
 metadata:
   name: $SDNC_APP_NAME
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_APP_NAME
     autotest: SDNC
@@ -46,7 +46,7 @@
 kind: Deployment
 metadata:
   name: $SDNC_DB_APP_NAME
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC
diff --git a/test/simulator-group/sdnc/app2.yaml b/test/simulator-group/sdnc/app2.yaml
index 8861fe0..1824cd1 100644
--- a/test/simulator-group/sdnc/app2.yaml
+++ b/test/simulator-group/sdnc/app2.yaml
@@ -2,7 +2,7 @@
 kind: Deployment
 metadata:
   name: $SDNC_APP_NAME
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_APP_NAME
     autotest: SDNC
@@ -62,7 +62,7 @@
 kind: Deployment
 metadata:
   name: $SDNC_DB_APP_NAME
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC
diff --git a/test/simulator-group/sdnc/svc.yaml b/test/simulator-group/sdnc/svc.yaml
index 45af8b6..f172c1c 100644
--- a/test/simulator-group/sdnc/svc.yaml
+++ b/test/simulator-group/sdnc/svc.yaml
@@ -2,7 +2,7 @@
 kind: Service
 metadata:
   name: $SDNC_APP_NAME
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_APP_NAME
     autotest: SDNC
@@ -24,7 +24,7 @@
 kind: Service
 metadata:
   name: dbhost
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC
@@ -42,7 +42,7 @@
 kind: Service
 metadata:
   name: sdnctldb01
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC