Upgraded test env with Kubernetes support

Issue-ID: NONRTRIC-356

Signed-off-by: BjornMagnussonXA <bjorn.magnusson@est.tech>
Change-Id: I942b37c05077b3ba753b3327455d6babed8f6061
diff --git a/test/simulator-group/dmaapmr/app.yaml b/test/simulator-group/dmaapmr/app.yaml
new file mode 100644
index 0000000..50e6943
--- /dev/null
+++ b/test/simulator-group/dmaapmr/app.yaml
@@ -0,0 +1,178 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: $MR_DMAAP_KUBE_APP_NAME
+  namespace: $KUBE_ONAP_NAMESPACE
+  labels:
+    run: $MR_DMAAP_KUBE_APP_NAME
+    autotest: DMAAPMR
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      run: $MR_DMAAP_KUBE_APP_NAME
+  template:
+    metadata:
+      labels:
+        run: $MR_DMAAP_KUBE_APP_NAME
+        autotest: DMAAPMR
+    spec:
+      containers:
+      - name: $MR_DMAAP_KUBE_APP_NAME
+        image: $ONAP_DMAAPMR_IMAGE
+        imagePullPolicy: Never
+        ports:
+        - name: http
+          containerPort: $MR_INTERNAL_PORT
+        - name: https
+          containerPort: $MR_INTERNAL_SECURE_PORT
+        env:
+        - name: enableCadi
+          value: 'false'
+        volumeMounts:
+        - mountPath: /appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
+          subPath: MsgRtrApi.properties
+          name: dmaapmr-msg-rtr-api
+        volumeMounts:
+        - mountPath: /appl/dmaapMR1/bundleconfig/etc/logback.xml
+          subPath: logback.xml
+          name: dmaapmr-log-back
+        volumeMounts:
+        - mountPath: /appl/dmaapMR1/etc/cadi.properties
+          subPath: cadi.properties
+          name: dmaapmr-cadi
+      volumes:
+      - configMap:
+          defaultMode: 420
+          name: dmaapmr-msgrtrapi.properties
+        name: dmaapmr-msg-rtr-api
+      - configMap:
+          defaultMode: 420
+          name: dmaapmr-logback.xml
+        name: dmaapmr-log-back
+      - configMap:
+          defaultMode: 420
+          name: dmaapmr-cadi.properties
+        name: dmaapmr-cadi
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: $MR_KAFKA_BWDS_NAME
+  namespace: $KUBE_ONAP_NAMESPACE
+  labels:
+    run: $MR_KAFKA_BWDS_NAME
+    autotest: DMAAPMR
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      run: $MR_KAFKA_BWDS_NAME
+  template:
+    metadata:
+      labels:
+        run: $MR_KAFKA_BWDS_NAME
+        autotest: DMAAPMR
+    spec:
+      containers:
+      - name: $MR_KAFKA_BWDS_NAME
+        image: $ONAP_KAFKA_IMAGE
+        imagePullPolicy: Never
+        ports:
+        - name: http
+          containerPort: 9092
+        env:
+        - name: enableCadi
+          value: 'false'
+        - name: KAFKA_ZOOKEEPER_CONNECT
+          value: 'zookeeper.onap:2181'
+        - name: KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS
+          value: '40000'
+        - name: KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS
+          value: '40000'
+        - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
+          value: 'INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT'
+#        - name: KAFKA_ADVERTISED_LISTENERS
+#          value: 'INTERNAL_PLAINTEXT://akfak-bwds.onap:9092'
+        - name: KAFKA_ADVERTISED_LISTENERS
+          value: 'INTERNAL_PLAINTEXT://localhost:9092'
+        - name: KAFKA_LISTENERS
+          value: 'INTERNAL_PLAINTEXT://0.0.0.0:9092'
+        - name: KAFKA_INTER_BROKER_LISTENER_NAME
+          value: INTERNAL_PLAINTEXT
+        - name: KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE
+          value: 'false'
+        - name: KAFKA_OPTS
+          value: '-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf'
+        - name: KAFKA_ZOOKEEPER_SET_ACL
+          value: 'true'
+        - name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR
+          value: '1'
+        - name: KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS
+          value: '1'
+        volumeMounts:
+        - mountPath: /etc/kafka/secrets/jaas/zk_client_jaas.conf
+          subPath: zk_client_jaas.conf
+          name: dmaapmr-zk-client-jaas
+      volumes:
+      - configMap:
+          defaultMode: 420
+          name: dmaapmr-zk-client-jaas.conf
+        name: dmaapmr-zk-client-jaas
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: $MR_ZOOKEEPER_APP_NAME
+  namespace: $KUBE_ONAP_NAMESPACE
+  labels:
+    run: $MR_ZOOKEEPER_APP_NAME
+    autotest: DMAAPMR
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      run: $MR_ZOOKEEPER_APP_NAME
+  template:
+    metadata:
+      labels:
+        run: $MR_ZOOKEEPER_APP_NAME
+        autotest: DMAAPMR
+    spec:
+      containers:
+      - name: $MR_ZOOKEEPER_APP_NAME
+        image: $ONAP_ZOOKEEPER_IMAGE
+        imagePullPolicy: Never
+        ports:
+        - name: http
+          containerPort: 2181
+        env:
+        - name: ZOOKEEPER_REPLICAS
+          value: '1'
+        - name: ZOOKEEPER_TICK_TIME
+          value: '2000'
+        - name: ZOOKEEPER_SYNC_LIMIT
+          value: '5'
+        - name: ZOOKEEPER_INIT_LIMIT
+          value: '10'
+        - name: ZOOKEEPER_MAX_CLIENT_CNXNS
+          value: '200'
+        - name: ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT
+          value: '3'
+        - name: ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL
+          value: '24'
+        - name: ZOOKEEPER_CLIENT_PORT
+          value: '2181'
+        - name: KAFKA_OPTS
+          value: '-Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl'
+        - name: ZOOKEEPER_SERVER_ID
+          value: '1'
+        volumeMounts:
+        - mountPath: /etc/zookeeper/secrets/jaas/zk_server_jaas.conf
+          subPath: zk_server_jaas.conf
+          name: dmaapmr-zk-server-jaas
+      volumes:
+      - configMap:
+          defaultMode: 420
+          name: dmaapmr-zk-server-jaas.conf
+        name: dmaapmr-zk-server-jaas
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/docker-compose.yaml b/test/simulator-group/dmaapmr/docker-compose.yaml
new file mode 100644
index 0000000..b418028
--- /dev/null
+++ b/test/simulator-group/dmaapmr/docker-compose.yaml
@@ -0,0 +1,89 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+version: '3.5'
+networks:
+  default:
+    external:
+      name: ${DOCKER_SIM_NWNAME}
+
+services:
+  zookeeper:
+    image: $ONAP_ZOOKEEPER_IMAGE
+    container_name: $MR_ZOOKEEPER_APP_NAME
+    ports:
+      - "2181:2181"
+    environment:
+     ZOOKEEPER_REPLICAS: 1
+     ZOOKEEPER_TICK_TIME: 2000
+     ZOOKEEPER_SYNC_LIMIT: 5
+     ZOOKEEPER_INIT_LIMIT: 10
+     ZOOKEEPER_MAX_CLIENT_CNXNS: 200
+     ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT: 3
+     ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL: 24
+     ZOOKEEPER_CLIENT_PORT: 2181
+     KAFKA_OPTS: -Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl
+     ZOOKEEPER_SERVER_ID: 1
+    volumes:
+      -  ./mnt/zk/zk_server_jaas.conf:/etc/zookeeper/secrets/jaas/zk_server_jaas.conf
+    networks:
+      - default
+
+  kafka:
+   image: $ONAP_KAFKA_IMAGE
+   container_name: $MR_KAFKA_APP_NAME
+   ports:
+    - "9092:9092"
+   environment:
+    enableCadi: 'false'
+    KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+    KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 40000
+    KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: 40000
+    KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT
+    KAFKA_ADVERTISED_LISTENERS: INTERNAL_PLAINTEXT://kafka:9092
+    KAFKA_LISTENERS: INTERNAL_PLAINTEXT://0.0.0.0:9092
+    KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL_PLAINTEXT
+    KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
+    KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf
+    KAFKA_ZOOKEEPER_SET_ACL: 'true'
+    KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+    # Reduced the number of partitions only to avoid the timeout error for the first subscribe call in slow environment
+    KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: 1
+   volumes:
+     -  ./mnt/kafka/zk_client_jaas.conf:/etc/kafka/secrets/jaas/zk_client_jaas.conf
+   networks:
+      - default
+   depends_on:
+    - zookeeper
+
+  dmaap:
+    image: $ONAP_DMAAPMR_IMAGE
+    container_name: $MR_DMAAP_APP_NAME
+    ports:
+      - ${MR_DMAAP_LOCALHOST_PORT}:${MR_INTERNAL_PORT}
+      - ${MR_DMAAP_LOCALHOST_SECURE_PORT}:${MR_INTERNAL_SECURE_PORT}
+    environment:
+     enableCadi: 'false'
+    volumes:
+      - ./mnt/mr/MsgRtrApi.properties:/appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
+      - ./mnt/mr/logback.xml:/appl/dmaapMR1/bundleconfig/etc/logback.xml
+      - ./mnt/mr/cadi.properties:/appl/dmaapMR1/etc/cadi.properties
+    networks:
+      - default
+    depends_on:
+      - zookeeper
+      - kafka
diff --git a/test/simulator-group/dmaapmr/mnt/kafka/zk_client_jaas.conf b/test/simulator-group/dmaapmr/mnt/kafka/zk_client_jaas.conf
new file mode 100644
index 0000000..d4ef1eb
--- /dev/null
+++ b/test/simulator-group/dmaapmr/mnt/kafka/zk_client_jaas.conf
@@ -0,0 +1,5 @@
+Client {
+   org.apache.zookeeper.server.auth.DigestLoginModule required
+   username="kafka"
+   password="kafka_secret";
+ };
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt/mr/KUBE-MsgRtrApi.properties b/test/simulator-group/dmaapmr/mnt/mr/KUBE-MsgRtrApi.properties
new file mode 100644
index 0000000..66c7db1
--- /dev/null
+++ b/test/simulator-group/dmaapmr/mnt/mr/KUBE-MsgRtrApi.properties
@@ -0,0 +1,173 @@
+# LICENSE_START=======================================================
+#  org.onap.dmaap
+#  ================================================================================
+#  Copyright © 2020 Nordix Foundation. All rights reserved.
+#  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+#  ================================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=========================================================
+#
+#  ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+###############################################################################
+###############################################################################
+##
+## Cambria API Server config
+##
+## Default values are shown as commented settings.
+##
+###############################################################################
+##
+## HTTP service
+##
+## 3904 is standard as of 7/29/14.
+#
+## Zookeeper Connection
+##
+## Both Cambria and Kafka make use of Zookeeper.
+##
+#config.zk.servers=172.18.1.1
+#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
+config.zk.servers=zookeeper.onap:2181
+
+#config.zk.root=/fe3c/cambria/config
+
+
+###############################################################################
+##
+## Kafka Connection
+##
+##        Items below are passed through to Kafka's producer and consumer
+##        configurations (after removing "kafka.")
+##        if you want to change request.required.acks it can take this one value
+#kafka.metadata.broker.list=localhost:9092,localhost:9093
+#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
+kafka.metadata.broker.list=akfak-bwds.onap:9092
+##kafka.request.required.acks=-1
+#kafka.client.zookeeper=${config.zk.servers}
+consumer.timeout.ms=100
+zookeeper.connection.timeout.ms=6000
+zookeeper.session.timeout.ms=20000
+zookeeper.sync.time.ms=2000
+auto.commit.interval.ms=1000
+fetch.message.max.bytes =1000000
+auto.commit.enable=false
+
+#(backoff*retries > zksessiontimeout)
+kafka.rebalance.backoff.ms=10000
+kafka.rebalance.max.retries=6
+
+
+###############################################################################
+##
+##        Secured Config
+##
+##        Some data stored in the config system is sensitive -- API keys and secrets,
+##        for example. to protect it, we use an encryption layer for this section
+##        of the config.
+##
+## The key is a base64 encode AES key. This must be created/configured for
+## each installation.
+#cambria.secureConfig.key=
+##
+## The initialization vector is a 16 byte value specific to the secured store.
+## This must be created/configured for each installation.
+#cambria.secureConfig.iv=
+
+## Southfield Sandbox
+cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
+cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
+authentication.adminSecret=fe3cCompound
+#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
+#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
+
+
+###############################################################################
+##
+## Consumer Caching
+##
+##        Kafka expects live connections from the consumer to the broker, which
+##        obviously doesn't work over connectionless HTTP requests. The Cambria
+##        server proxies HTTP requests into Kafka consumer sessions that are kept
+##        around for later re-use. Not doing so is costly for setup per request,
+##        which would substantially impact a high volume consumer's performance.
+##
+##        This complicates Cambria server failover, because we often need server
+##        A to close its connection before server B brings up the replacement.
+##
+
+## The consumer cache is normally enabled.
+#cambria.consumer.cache.enabled=true
+
+## Cached consumers are cleaned up after a period of disuse. The server inspects
+## consumers every sweepFreqSeconds and will clean up any connections that are
+## dormant for touchFreqMs.
+#cambria.consumer.cache.sweepFreqSeconds=15
+cambria.consumer.cache.touchFreqMs=120000
+##stickforallconsumerrequests=false
+## The cache is managed through ZK. The default value for the ZK connection
+## string is the same as config.zk.servers.
+#cambria.consumer.cache.zkConnect=${config.zk.servers}
+
+##
+## Shared cache information is associated with this node's name. The default
+## name is the hostname plus the HTTP service port this host runs on. (The
+## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
+## which is not always adequate.) You can set this value explicitly here.
+##
+#cambria.api.node.identifier=<use-something-unique-to-this-instance>
+
+#cambria.rateLimit.maxEmptyPollsPerMinute=30
+#cambria.rateLimitActual.delay.ms=10
+
+###############################################################################
+##
+## Metrics Reporting
+##
+##        This server can report its metrics periodically on a topic.
+##
+#metrics.send.cambria.enabled=true
+#metrics.send.cambria.topic=cambria.apinode.metrics                                  #msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.sendEverySeconds=60
+
+cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
+consumer.timeout=17
+default.partitions=3
+default.replicas=3
+##############################################################################
+#100mb
+maxcontentlength=10000
+
+
+##############################################################################
+#AAF Properties
+msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
+msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
+enforced.topic.name.AAF=org.onap.dmaap.mr
+forceAAF=false
+transidUEBtopicreqd=false
+defaultNSforUEB=org.onap.dmaap.mr
+##############################################################################
+#Mirror Maker Agent
+
+msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
+msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
+msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
+msgRtr.mirrormaker.timeout=15000
+msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
+msgRtr.mirrormaker.consumergroup=mmagentserver
+msgRtr.mirrormaker.consumerid=1
+
+kafka.max.poll.interval.ms=300000
+kafka.heartbeat.interval.ms=60000
+kafka.session.timeout.ms=240000
+kafka.max.poll.records=1000
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt/mr/MsgRtrApi.properties b/test/simulator-group/dmaapmr/mnt/mr/MsgRtrApi.properties
new file mode 100644
index 0000000..573a81a
--- /dev/null
+++ b/test/simulator-group/dmaapmr/mnt/mr/MsgRtrApi.properties
@@ -0,0 +1,173 @@
+# LICENSE_START=======================================================
+#  org.onap.dmaap
+#  ================================================================================
+#  Copyright © 2020 Nordix Foundation. All rights reserved.
+#  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+#  ================================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=========================================================
+#
+#  ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+###############################################################################
+###############################################################################
+##
+## Cambria API Server config
+##
+## Default values are shown as commented settings.
+##
+###############################################################################
+##
+## HTTP service
+##
+## 3904 is standard as of 7/29/14.
+#
+## Zookeeper Connection
+##
+## Both Cambria and Kafka make use of Zookeeper.
+##
+#config.zk.servers=172.18.1.1
+#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
+config.zk.servers=zookeeper:2181
+
+#config.zk.root=/fe3c/cambria/config
+
+
+###############################################################################
+##
+## Kafka Connection
+##
+##        Items below are passed through to Kafka's producer and consumer
+##        configurations (after removing "kafka.")
+##        if you want to change request.required.acks it can take this one value
+#kafka.metadata.broker.list=localhost:9092,localhost:9093
+#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
+kafka.metadata.broker.list=kafka:9092
+##kafka.request.required.acks=-1
+#kafka.client.zookeeper=${config.zk.servers}
+consumer.timeout.ms=100
+zookeeper.connection.timeout.ms=6000
+zookeeper.session.timeout.ms=20000
+zookeeper.sync.time.ms=2000
+auto.commit.interval.ms=1000
+fetch.message.max.bytes =1000000
+auto.commit.enable=false
+
+#(backoff*retries > zksessiontimeout)
+kafka.rebalance.backoff.ms=10000
+kafka.rebalance.max.retries=6
+
+
+###############################################################################
+##
+##        Secured Config
+##
+##        Some data stored in the config system is sensitive -- API keys and secrets,
+##        for example. to protect it, we use an encryption layer for this section
+##        of the config.
+##
+## The key is a base64 encode AES key. This must be created/configured for
+## each installation.
+#cambria.secureConfig.key=
+##
+## The initialization vector is a 16 byte value specific to the secured store.
+## This must be created/configured for each installation.
+#cambria.secureConfig.iv=
+
+## Southfield Sandbox
+cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
+cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
+authentication.adminSecret=fe3cCompound
+#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
+#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
+
+
+###############################################################################
+##
+## Consumer Caching
+##
+##        Kafka expects live connections from the consumer to the broker, which
+##        obviously doesn't work over connectionless HTTP requests. The Cambria
+##        server proxies HTTP requests into Kafka consumer sessions that are kept
+##        around for later re-use. Not doing so is costly for setup per request,
+##        which would substantially impact a high volume consumer's performance.
+##
+##        This complicates Cambria server failover, because we often need server
+##        A to close its connection before server B brings up the replacement.
+##
+
+## The consumer cache is normally enabled.
+#cambria.consumer.cache.enabled=true
+
+## Cached consumers are cleaned up after a period of disuse. The server inspects
+## consumers every sweepFreqSeconds and will clean up any connections that are
+## dormant for touchFreqMs.
+#cambria.consumer.cache.sweepFreqSeconds=15
+cambria.consumer.cache.touchFreqMs=120000
+##stickforallconsumerrequests=false
+## The cache is managed through ZK. The default value for the ZK connection
+## string is the same as config.zk.servers.
+#cambria.consumer.cache.zkConnect=${config.zk.servers}
+
+##
+## Shared cache information is associated with this node's name. The default
+## name is the hostname plus the HTTP service port this host runs on. (The
+## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
+## which is not always adequate.) You can set this value explicitly here.
+##
+#cambria.api.node.identifier=<use-something-unique-to-this-instance>
+
+#cambria.rateLimit.maxEmptyPollsPerMinute=30
+#cambria.rateLimitActual.delay.ms=10
+
+###############################################################################
+##
+## Metrics Reporting
+##
+##        This server can report its metrics periodically on a topic.
+##
+#metrics.send.cambria.enabled=true
+#metrics.send.cambria.topic=cambria.apinode.metrics                                  #msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.sendEverySeconds=60
+
+cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
+consumer.timeout=17
+default.partitions=3
+default.replicas=3
+##############################################################################
+#100mb
+maxcontentlength=10000
+
+
+##############################################################################
+#AAF Properties
+msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
+msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
+enforced.topic.name.AAF=org.onap.dmaap.mr
+forceAAF=false
+transidUEBtopicreqd=false
+defaultNSforUEB=org.onap.dmaap.mr
+##############################################################################
+#Mirror Maker Agent
+
+msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
+msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
+msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
+msgRtr.mirrormaker.timeout=15000
+msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
+msgRtr.mirrormaker.consumergroup=mmagentserver
+msgRtr.mirrormaker.consumerid=1
+
+kafka.max.poll.interval.ms=300000
+kafka.heartbeat.interval.ms=60000
+kafka.session.timeout.ms=240000
+kafka.max.poll.records=1000
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt/mr/cadi.properties b/test/simulator-group/dmaapmr/mnt/mr/cadi.properties
new file mode 100644
index 0000000..dca56c8
--- /dev/null
+++ b/test/simulator-group/dmaapmr/mnt/mr/cadi.properties
@@ -0,0 +1,19 @@
+aaf_locate_url=https://aaf-locate.{{ include "common.namespace" . }}:8095
+aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
+aaf_env=DEV
+aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
+
+cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+cadi_truststore_password=enc:mN6GiIzFQxKGDzAXDOs7b4j8DdIX02QrZ9QOWNRpxV3rD6whPCfizSMZkJwxi_FJ
+
+cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
+
+cadi_alias=dmaapmr@mr.dmaap.onap.org
+cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
+cadi_keystore_password=enc:_JJT2gAEkRzXla5xfDIHal8pIoIB5iIos3USvZQT6sL-l14LpI5fRFR_QIGUCh5W
+cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
+
+cadi_loglevel=INFO
+cadi_protocols=TLSv1.1,TLSv1.2
+cadi_latitude=37.78187
+cadi_longitude=-122.26147
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt/mr/logback.xml b/test/simulator-group/dmaapmr/mnt/mr/logback.xml
new file mode 100644
index 0000000..8471208
--- /dev/null
+++ b/test/simulator-group/dmaapmr/mnt/mr/logback.xml
@@ -0,0 +1,209 @@
+<!--
+     ============LICENSE_START=======================================================
+     Copyright © 2020 Nordix Foundation. All rights reserved.
+     Copyright © 2019 AT&T Intellectual Property. All rights reserved.
+     ================================================================================
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+           http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+     ============LICENSE_END=========================================================
+ -->
+
+<configuration scan="true" scanPeriod="3 seconds" debug="false">
+  <contextName>${module.ajsc.namespace.name}</contextName>
+  <jmxConfigurator />
+  <property name="logDirectory" value="${AJSC_HOME}/log" />
+  <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.LevelFilter">
+      <level>ERROR</level>
+      <onMatch>ACCEPT</onMatch>
+      <onMismatch>DENY</onMismatch>
+    </filter>
+    <encoder>
+      <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n
+      </pattern>
+    </encoder>
+  </appender>
+
+  <appender name="INFO" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.LevelFilter">
+      <level>INFO</level>
+      <onMatch>ACCEPT</onMatch>
+      <onMismatch>DENY</onMismatch>
+    </filter>
+  </appender>
+
+  <appender name="DEBUG" class="ch.qos.logback.core.ConsoleAppender">
+
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+
+  <appender name="ERROR" class="ch.qos.logback.core.ConsoleAppender"> class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.LevelFilter">
+      <level>ERROR</level>
+      <onMatch>ACCEPT</onMatch>
+      <onMismatch>DENY</onMismatch>
+    </filter>
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+
+
+  <!-- Msgrtr related loggers -->
+  <logger name="org.onap.dmaap.dmf.mr.service" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.service.impl" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.resources" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.resources.streamReaders" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.backends" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.backends.kafka" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.backends.memory" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.beans" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.constants" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.exception" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.listener" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.metabroker" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.metrics.publisher" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.metrics.publisher.impl" level="INFO" />
+
+
+
+  <logger name="org.onap.dmaap.dmf.mr.security" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.security.impl" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.transaction" level="INFO" />
+  <logger name="com.att.dmf.mr.transaction.impl" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.metabroker" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.metabroker" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.utils" level="INFO" />
+  <logger name="org.onap.dmaap.mr.filter" level="INFO" />
+
+  <!--<logger name="com.att.nsa.cambria.*" level="INFO" />-->
+
+  <!-- Msgrtr loggers in ajsc -->
+  <logger name="org.onap.dmaap.service" level="INFO" />
+  <logger name="org.onap.dmaap" level="INFO" />
+
+
+  <!-- Spring related loggers -->
+  <logger name="org.springframework" level="WARN" additivity="false"/>
+  <logger name="org.springframework.beans" level="WARN" additivity="false"/>
+  <logger name="org.springframework.web" level="WARN" additivity="false" />
+  <logger name="com.blog.spring.jms" level="WARN" additivity="false" />
+
+  <!-- AJSC Services (bootstrap services) -->
+  <logger name="ajsc" level="WARN" additivity="false"/>
+  <logger name="ajsc.RouteMgmtService" level="INFO" additivity="false"/>
+  <logger name="ajsc.ComputeService" level="INFO" additivity="false" />
+  <logger name="ajsc.VandelayService" level="WARN" additivity="false"/>
+  <logger name="ajsc.FilePersistenceService" level="WARN" additivity="false"/>
+  <logger name="ajsc.UserDefinedJarService" level="WARN" additivity="false" />
+  <logger name="ajsc.UserDefinedBeansDefService" level="WARN" additivity="false" />
+  <logger name="ajsc.LoggingConfigurationService" level="WARN" additivity="false" />
+
+  <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
+    logging) -->
+  <logger name="ajsc.utils" level="WARN" additivity="false"/>
+  <logger name="ajsc.utils.DME2Helper" level="INFO" additivity="false" />
+  <logger name="ajsc.filters" level="DEBUG" additivity="false" />
+  <logger name="ajsc.beans.interceptors" level="DEBUG" additivity="false" />
+  <logger name="ajsc.restlet" level="DEBUG" additivity="false" />
+  <logger name="ajsc.servlet" level="DEBUG" additivity="false" />
+  <logger name="com.att" level="WARN" additivity="false" />
+  <logger name="com.att.ajsc.csi.logging" level="WARN" additivity="false" />
+  <logger name="com.att.ajsc.filemonitor" level="WARN" additivity="false"/>
+
+  <logger name="com.att.nsa.dmaap.util" level="INFO" additivity="false"/>
+  <logger name="com.att.cadi.filter" level="INFO" additivity="false" />
+
+
+  <!-- Other Loggers that may help troubleshoot -->
+  <logger name="net.sf" level="WARN" additivity="false" />
+  <logger name="org.apache.commons.httpclient" level="WARN" additivity="false"/>
+  <logger name="org.apache.commons" level="WARN" additivity="false" />
+  <logger name="org.apache.coyote" level="WARN" additivity="false"/>
+  <logger name="org.apache.jasper" level="WARN" additivity="false"/>
+
+  <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+    May aid in troubleshooting) -->
+  <logger name="org.apache.camel" level="WARN" additivity="false" />
+  <logger name="org.apache.cxf" level="WARN" additivity="false" />
+  <logger name="org.apache.camel.processor.interceptor" level="WARN" additivity="false"/>
+  <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" additivity="false" />
+  <logger name="org.apache.cxf.service" level="WARN" additivity="false" />
+  <logger name="org.restlet" level="DEBUG" additivity="false" />
+  <logger name="org.apache.camel.component.restlet" level="DEBUG" additivity="false" />
+  <logger name="org.apache.kafka" level="DEBUG" additivity="false" />
+  <logger name="org.apache.zookeeper" level="INFO" additivity="false" />
+  <logger name="org.I0Itec.zkclient" level="DEBUG" additivity="false" />
+
+  <!-- logback internals logging -->
+  <logger name="ch.qos.logback.classic" level="INFO" additivity="false"/>
+  <logger name="ch.qos.logback.core" level="INFO" additivity="false" />
+
+  <!-- logback jms appenders & loggers definition starts here -->
+  <!-- logback jms appenders & loggers definition starts here -->
+  <appender name="auditLogs" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+    </filter>
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+  <appender name="perfLogs" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+    </filter>
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+  <appender name="ASYNC-audit" class="ch.qos.logback.classic.AsyncAppender">
+    <queueSize>1000</queueSize>
+    <discardingThreshold>0</discardingThreshold>
+    <appender-ref ref="Audit-Record-Queue" />
+  </appender>
+
+  <logger name="AuditRecord" level="INFO" additivity="FALSE">
+    <appender-ref ref="STDOUT" />
+  </logger>
+  <logger name="AuditRecord_DirectCall" level="INFO" additivity="FALSE">
+    <appender-ref ref="STDOUT" />
+  </logger>
+  <appender name="ASYNC-perf" class="ch.qos.logback.classic.AsyncAppender">
+    <queueSize>1000</queueSize>
+    <discardingThreshold>0</discardingThreshold>
+    <appender-ref ref="Performance-Tracker-Queue" />
+  </appender>
+  <logger name="PerfTrackerRecord" level="INFO" additivity="FALSE">
+    <appender-ref ref="ASYNC-perf" />
+    <appender-ref ref="perfLogs" />
+  </logger>
+  <!-- logback jms appenders & loggers definition ends here -->
+
+  <root level="DEBUG">
+    <appender-ref ref="DEBUG" />
+    <appender-ref ref="ERROR" />
+    <appender-ref ref="INFO" />
+    <appender-ref ref="STDOUT" />
+  </root>
+
+</configuration>
diff --git a/test/simulator-group/dmaapmr/mnt/zk/zk_server_jaas.conf b/test/simulator-group/dmaapmr/mnt/zk/zk_server_jaas.conf
new file mode 100644
index 0000000..26bf460
--- /dev/null
+++ b/test/simulator-group/dmaapmr/mnt/zk/zk_server_jaas.conf
@@ -0,0 +1,4 @@
+Server {
+       org.apache.zookeeper.server.auth.DigestLoginModule required
+       user_kafka=kafka_secret;
+};
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/svc.yaml b/test/simulator-group/dmaapmr/svc.yaml
new file mode 100644
index 0000000..0a02b4f
--- /dev/null
+++ b/test/simulator-group/dmaapmr/svc.yaml
@@ -0,0 +1,57 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: $MR_DMAAP_KUBE_APP_NAME
+  namespace: $KUBE_ONAP_NAMESPACE
+  labels:
+    run: $MR_DMAAP_KUBE_APP_NAME
+    autotest: DMAAPMR
+spec:
+  type: ClusterIP
+  ports:
+  - port: $MR_EXTERNAL_PORT
+    targetPort: $MR_INTERNAL_PORT
+    protocol: TCP
+    name: http
+  - port: $MR_EXTERNAL_SECURE_PORT
+    targetPort: $MR_INTERNAL_SECURE_PORT
+    protocol: TCP
+    name: https
+  selector:
+    run: $MR_DMAAP_KUBE_APP_NAME
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: $MR_KAFKA_BWDS_NAME
+  namespace: $KUBE_ONAP_NAMESPACE
+  labels:
+    run: $MR_KAFKA_BWDS_NAME
+    autotest: DMAAPMR
+spec:
+  type: ClusterIP
+  ports:
+  - port: 9092
+    targetPort: 9092
+    protocol: TCP
+    name: http
+  selector:
+    run: $MR_KAFKA_BWDS_NAME
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: $MR_ZOOKEEPER_APP_NAME
+  namespace: $KUBE_ONAP_NAMESPACE
+  labels:
+    run: $MR_ZOOKEEPER_APP_NAME
+    autotest: DMAAPMR
+spec:
+  type: ClusterIP
+  ports:
+  - port: 2181
+    targetPort: 2181
+    protocol: TCP
+    name: http
+  selector:
+    run: $MR_ZOOKEEPER_APP_NAME
\ No newline at end of file