Merge "Add INFO.yaml file"
diff --git a/bootstrap/installer-docker.sh-template b/bootstrap/installer-docker.sh-template
index 5957381..21ccabb 100755
--- a/bootstrap/installer-docker.sh-template
+++ b/bootstrap/installer-docker.sh-template
@@ -39,6 +39,7 @@
 TCABP=tca.yaml
 HRULESBP=holmes-rules.yaml
 HENGINEBP=holmes-engine.yaml
+PRHBP=prh.yaml
 
 DOCKERBPURL="${PLATBPSRC}/${DOCKERBP}"
 CBSBPURL="${PLATBPSRC}/${CBSBP}"
@@ -52,6 +53,7 @@
 TCABPURL="${PLATBPSRC}/${TCABP}"
 HRULESBPURL="${PLATBPSRC}/${HRULESBP}"
 HENGINEBPURL="${PLATBPSRC}/${HENGINEBP}"
+PRHBPURL="${PLATBPSRC}/${PRHBP}"
 
 LOCATIONID=$(printenv LOCATION)
 
@@ -369,6 +371,7 @@
 wget -P ./blueprints/tca/ ${TCABPURL}
 wget -P ./blueprints/hrules/ ${HRULESBPURL}
 wget -P ./blueprints/hengine/ ${HENGINEBPURL}
+wget -P ./blueprints/prh/ ${PRHBPURL}
 
 
 # Set up the credentials for access to the Docker registry
@@ -444,6 +447,8 @@
 cfy install -p ./blueprints/hrules/${HRULESBP} -b hrules -d hrules -i ../config/hr-ip.yaml
 cfy install -p ./blueprints/hengine/${HENGINEBP} -b hengine -d hengine -i ../config/he-ip.yaml
 
+# PRH
+cfy install -p ./blueprints/prh/${PRHBP} -b prh -d prh -i ../config/prhinput.yaml
 
 # write out IP addresses
 echo "$CONSULIP" > "$STARTDIR"/config/runtime.ip.consul
diff --git a/bootstrap/pom.xml b/bootstrap/pom.xml
index 9f1a043..d2965e9 100644
--- a/bootstrap/pom.xml
+++ b/bootstrap/pom.xml
@@ -28,7 +28,7 @@
   <groupId>org.onap.dcaegen2.deployments</groupId>
   <artifactId>bootstrap</artifactId>
   <name>dcaegen2-deployments-bootstrap</name>
-  <version>1.1.2-SNAPSHOT</version>
+  <version>1.2.0-SNAPSHOT</version>
   <url>http://maven.apache.org</url>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
diff --git a/cm-container/test-expand.sh b/cm-container/test-expand.sh
index 0d5e8e5..7faee6c 100755
--- a/cm-container/test-expand.sh
+++ b/cm-container/test-expand.sh
@@ -1,2 +1,2 @@
 #!/bin/bash
-sed -e 's#{{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_plugins_releases }}#https://nexus.onap.org/content/sites/raw/org.onap.dcaegen2.platform.plugins/R2#g' Dockerfile-template > Dockerfile
+sed -e 's#{{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_plugins_releases }}#https://nexus.onap.org/content/sites/raw/org.onap.dcaegen2.platform.plugins/R3#g' Dockerfile-template > Dockerfile
diff --git a/healthcheck-container/get-status.js b/healthcheck-container/get-status.js
index 2ed1d3d..034ff9d 100644
--- a/healthcheck-container/get-status.js
+++ b/healthcheck-container/get-status.js
@@ -96,6 +96,30 @@
 	});
 };
 
+const getStatusSinglePromise = function (item) {
+	// Expect item to be of the form {namespace: "namespace", deployment: "deployment_name"}
+	return new Promise(function(resolve, reject){
+		const path = K8S_PATH + item.namespace + '/deployments/' + item.deployment;
+		queryKubernetes(path, function(error, res, body){
+			if (error) {
+				reject(error);
+			}
+			else if (res.statusCode === 404) {
+				// Treat absent deployment as if it's an unhealthy deployment
+				resolve ({
+					metadata: {name: item.deployment},
+					status: {unavailableReplicas: 1}
+				});
+			}
+			else if (res.statusCode != 200) {
+				reject(body);
+			}
+			else {
+				resolve(body);
+			}
+		});
+	});
+}
 exports.getStatusNamespace = function (namespace, callback) {
 	// Get readiness information for all deployments in namespace
 	const path = K8S_PATH + namespace + '/deployments';
@@ -106,4 +130,12 @@
 	// Get readiness information for a single deployment
 	const path = K8S_PATH + namespace + '/deployments/' + deployment;
 	getStatus(path, summarizeDeployment, callback);
-};
\ No newline at end of file
+};
+
+exports.getStatusListPromise = function (list) {
+	// List is of the form [{namespace: "namespace", deployment: "deployment_name"}, ... ]
+	const p = Promise.all(list.map(getStatusSinglePromise))
+	return p.then(function(results) {
+	    return summarizeDeploymentList({items: results});
+	});
+}
\ No newline at end of file
diff --git a/healthcheck-container/healthcheck.js b/healthcheck-container/healthcheck.js
index 7555032..ca1df84 100644
--- a/healthcheck-container/healthcheck.js
+++ b/healthcheck-container/healthcheck.js
@@ -17,16 +17,45 @@
 //Expect ONAP and DCAE namespaces and Helm "release" name to be passed via environment variables
 // 
 const ONAP_NS = process.env.ONAP_NAMESPACE || 'default';
-const DCAE_NS = process.env.DCAE_NAMESPACE || 'default';
+const DCAE_NS = process.env.DCAE_NAMESPACE || process.env.ONAP_NAMESPACE || 'default';
 const HELM_REL = process.env.HELM_RELEASE || '';
 
 const HEALTHY = 200;
 const UNHEALTHY = 500;
 const UNKNOWN = 503;
 
+// List of deployments expected to be created via Helm
+const helmDeps = 
+	[
+		'dcae-cloudify-manager'
+	];
+
+// List of deployments expected to be created via Cloudify Manager
+const dcaeDeps  = 
+	[
+		'dep-config-binding-service',
+		'dep-deployment-handler',
+		'dep-inventory',
+		'dep-service-change-handler',
+		'dep-policy-handler',
+		'dep-dcae-ves-collector',
+		'dep-dcae-tca-analytics'
+	];
+
 const status = require('./get-status');
 const http = require('http');
 
+// Helm deployments are always in the ONAP namespace and prefixed by Helm release name
+const helmList = helmDeps.map(function(name) {
+	return {namespace: ONAP_NS, deployment: HELM_REL.length > 0 ? HELM_REL + '-' + name : name};
+});
+
+// DCAE deployments via CM don't have a release prefix and are in the DCAE namespace,
+// which can be the same as the ONAP namespace
+const dcaeList = dcaeDeps.map(function(name) {
+	return {namespace: DCAE_NS, deployment: name};
+});
+
 const isHealthy = function(summary) {
 	// Current healthiness criterion is simple--all deployments are ready
 	return summary.count && summary.ready && summary.count === summary.ready;
@@ -38,33 +67,13 @@
 	// If we get responses from k8s but don't find all deployments ready, health status is UNHEALTHY (503)
 	// If we get responses from k8s and all deployments are ready, health status is HEALTHY (200)
 	// This could be a lot more nuanced, but what's here should be sufficient for R2 OOM healthchecking
-	status.getStatusNamespace(DCAE_NS, function(err, res, body) {
-		let ret = {status : UNKNOWN, body: [body]};
-		if (err) {
-			callback(ret);
-		}
-		else if (body.type && body.type === 'summary') {
-			if (isHealthy(body)) {
-				// All the DCAE components report healthy -- check Cloudify Manager
-				let cmDeployment = 'dcae-cloudify-manager';
-				if (HELM_REL.length > 0) {
-					cmDeployment = HELM_REL + '-' + cmDeployment;
-				}
-				status.getStatusSingle(ONAP_NS, cmDeployment, function (err, res, body){
-					ret.body.push(body);
-					if (err) {
-						callback(ret);
-					}
-					if (body.type && body.type === 'summary') {
-						ret.status = isHealthy(body) ? HEALTHY : UNHEALTHY;
-					}
-					callback(ret);
-				});
-			}
-			else {
-				callback(ret);
-			}
-		}
+	
+	status.getStatusListPromise(helmList.concat(dcaeList))
+	.then(function(body) {
+		callback({status: isHealthy(body) ? HEALTHY : UNHEALTHY, body: body});
+	})
+	.catch(function(error){
+		callback({status: UNKNOWN, body: [error]})
 	});
 };
 
diff --git a/healthcheck-container/package.json b/healthcheck-container/package.json
index 2a08bdd..c4b7560 100644
--- a/healthcheck-container/package.json
+++ b/healthcheck-container/package.json
@@ -1,7 +1,7 @@
 {
   "name": "k8s-healthcheck",
   "description": "DCAE healthcheck server",
-  "version": "1.0.0",
+  "version": "1.1.0",
   "main": "healthcheck.js",
   "dependencies": {
     "request": "2.85.0"
diff --git a/healthcheck-container/pom.xml b/healthcheck-container/pom.xml
index dea3c48..415c1cb 100644
--- a/healthcheck-container/pom.xml
+++ b/healthcheck-container/pom.xml
@@ -27,7 +27,7 @@
   <groupId>org.onap.dcaegen2.deployments</groupId>
   <artifactId>healthcheck-container</artifactId>
   <name>dcaegen2-deployments-healthcheck-container</name>
-  <version>1.0.0</version>
+  <version>1.1.0</version>
   <url>http://maven.apache.org</url>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
diff --git a/heat/docker-compose-1.yaml b/heat/docker-compose-1.yaml
index 82095ad..b8a04d1 100644
--- a/heat/docker-compose-1.yaml
+++ b/heat/docker-compose-1.yaml
@@ -3,6 +3,7 @@
    pgHolmes:
       image: "postgres:9.5"
       container_name: "pgHolmes"
+      restart: "always"
       hostname: "phHolmes"
       environment:
        - "POSTGRES_USER=holmes"
@@ -18,6 +19,7 @@
    pgInventory:
       image: "postgres:9.5"
       container_name: "pgInventory"
+      restart: "always"
       hostname: "pgInventory"
       environment:
        - "POSTGRES_USER=inventory"
@@ -32,8 +34,9 @@
 
 
    consul:
-      image: consul:0.8.3
+      image: "consul:0.8.3"
       container_name: "consul"
+      restart: "always"
       hostname: "consul"
       ports:
        - "8500:8500"
@@ -50,6 +53,7 @@
    config-binding-service:
       image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.platform.configbinding:{{ dcae_docker_cbs }}"
       container_name: "config_binding_service"
+      restart: "always"
       hostname: "config-binding-service"
       environment:
        - "CONSUL_HOST=consul"
diff --git a/heat/docker-compose-2.yaml b/heat/docker-compose-2.yaml
index 4fd9d5a..1666caf 100644
--- a/heat/docker-compose-2.yaml
+++ b/heat/docker-compose-2.yaml
@@ -4,6 +4,7 @@
    mvp-dcaegen2-collectors-ves:
       image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.collectors.ves.vescollector:{{ dcae_docker_ves }}"
       container_name: "mvp-dcaegen2-collectors-ves"
+      restart: "always"
       hostname: "mvp-dcaegen2-collectors-ves"
       environment:
        - "DMAAPHOST={{ mr_ip_addr }}"
@@ -24,12 +25,13 @@
    mvp-dcaegen2-analytics-tca:
       image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.deployments.tca-cdap-container:{{ dcae_docker_tca }}"
       container_name: "mvp-dcaegen2-analytics-tca"
+      restart: "always"
       hostname: "mvp-dcaegen2-analytics-tca"
       environment:
        - "DMAAPHOST={{ mr_ip_addr }}"
        - "DMAAPPORT=3904"
        - "DMAAPPUBTOPIC=unauthenticated.DCAE_CL_OUTPUT"
-       - "DMAAPSUBTOPIC=unauthenticated.SEC_MEASUREMENT_OUTPUT"
+       - "DMAAPSUBTOPIC=unauthenticated.VES_MEASUREMENT_OUTPUT"
        - "AAIHOST={{ aai1_ip_addr }}"
        - "AAIPORT=8443"
        - "CONSUL_HOST=consul"
@@ -50,10 +52,11 @@
        - "SERVICE_11011_CHECK_INTERVAL=15s"
        - "SERVICE_11011_CHECK_INITIAL_STATUS=passing"
 
-   mvp-dcae-analytics-holmes-engine-management:
+   mvp-dcaegen2-analytics-holmes-engine-management:
       image: "{{ nexus_docker_repo}}/onap/holmes/engine-management:{{ holmes_docker_em }}"
-      container_name: "mvp-dcae-analytics-holmes-engine-management"
-      hostname: "mvp-dcae-analytics-holmes-engine-management"
+      container_name: "mvp-dcaegen2-analytics-holmes-engine-management"
+      restart: "always"
+      hostname: "mvp-dcaegen2-analytics-holmes-engine-management"
       environment:
        - "URL_JDBC=pgHolmes:5432"
        - "JDBC_USERNAME=holmes"
@@ -62,16 +65,17 @@
        - "CONSUL_HOST=consul"
        - "CONSUL_PORT=8500"
        - "CONFIG_BINDING_SERVICE=config_binding_service"
-       - "HOSTNAME=mvp-dcae-analytics-holmes-engine-management"
+       - "HOSTNAME=mvp-dcaegen2-analytics-holmes-engine-management"
       ports:
        - "9102:9102"
       labels:
        - "SERVICE_9102_IGNORE=true"
 
-   mvp-dcae-analytics-holmes-rule-management:
+   mvp-dcaegen2-analytics-holmes-rule-management:
       image: "{{ nexus_docker_repo}}/onap/holmes/rule-management:{{ holmes_docker_rm }}"
-      container_name: "mvp-dcae-analytics-holmes-rule-management"
-      hostname: "mvp-dcae-analytics-holmes-rule-management"
+      container_name: "mvp-dcaegen2-analytics-holmes-rule-management"
+      restart: "always"
+      hostname: "mvp-dcaegen2-analytics-holmes-rule-management"
       environment:
        - "URL_JDBC=pgHolmes:5432"
        - "JDBC_USERNAME=holmes"
@@ -80,7 +84,7 @@
        - "CONSUL_HOST=consul"
        - "CONSUL_PORT=8500"
        - "CONFIG_BINDING_SERVICE=config_binding_service"
-       - "HOSTNAME=mvp-dcae-analytics-holmes-rule-management"
+       - "HOSTNAME=mvp-dcaegen2-analytics-holmes-rule-management"
       ports:
        - "9101:9101"
       labels:
diff --git a/heat/docker-compose-3.yaml b/heat/docker-compose-3.yaml
index f6c9212..3eef2bc 100644
--- a/heat/docker-compose-3.yaml
+++ b/heat/docker-compose-3.yaml
@@ -3,7 +3,7 @@
 
    inventory:
       image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.inventory-api:{{ dcae_docker_inv }}"
-      restart: always
+      restart: "always"
       container_name: "inventory"
       hostname: "inventory"
       environment:
@@ -21,6 +21,7 @@
    service-change-handler:
       image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.servicechange-handler:{{ dcae_docker_sch }}"
       container_name: "service-change-handler"
+      restart: "always"
       hostname: "service-change-handler"
       ports:
        - "8079:8079"
@@ -36,8 +37,8 @@
 
    deployment_handler:
       image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.deployment-handler:{{ dcae_docker_dh }}"
-      restart: always
       container_name: "deployment-handler"
+      restart: "always"
       hostname: "deployment-handler"
       environment:
        - "CLOUDIFY_PASSWORD=admin"
@@ -53,8 +54,8 @@
 
    policy_handler:
       image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.policy-handler:{{ dcae_docker_ph }}"
-      restart: always
       container_name: "policy-handler"
+      restart: "always"
       hostname: "policy-handler"
       ports:
        - "25577:25577"
diff --git a/heat/docker-compose-4.yaml b/heat/docker-compose-4.yaml
index c8c36ca..2aa0a3e 100644
--- a/heat/docker-compose-4.yaml
+++ b/heat/docker-compose-4.yaml
@@ -1,49 +1,10 @@
 version: '2.1'
 services:
-   heartbeat:
-      image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.platform.heartbeat:{{ dcae_docker_heartbeat }}"
-      container_name: static-dcaegen2-services-heartbeat
-      hostname: static-dcaegen2-services-heartbeat
-      environment:
-       - "DMAAPHOST={{ mr_ip_addr }}"
-       - "CONSUL_HOST=consul"
-       - "CONSUL_PORT=8500"
-       - "CONFIG_BINDING_SERVICE=config_binding_service"
-       - "SERVICE_NAME=static-dcaegen2-services-heartbeat"
-       - "HOSTNAME=static-dcaegen2-services-heartbeat"
-      ports:
-       - "10001:10000"
-      labels:
-       - "SERVICE_NAME=static-dcaegen2-services-heartbeat"
-       - "SERVICE_CHECK_DOCKER_SCRIPT=/app/bin/check_health.py"
-       - "SERVICE_CHECK_INTERVAL=15s"
-       - "SERVICE_CHECK_INITIAL_STATUS=passing"
-
-
-   prh:
-      image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.services.prh.prh-app-server:{{ dcae_docker_prh }}"
-      container_name: prh
-      hostname: prh
-      environment:
-       - "DMAAPHOST={{ mr_ip_addr }}"
-       - "CONSUL_HOST=consul"
-       - "CONSUL_PORT=8500"
-       - "CONFIG_BINDING_SERVICE=config_binding_service"
-       - "SERVICE_NAME=static-dcaegen2-services-prh"
-       - "HOSTNAME=static-dcaegen2-services-prh"
-      ports:
-       - "8100:8100"
-      labels:
-       - "SERVICE_8100_NAME=static-dcaegen2-services-prh"
-       - "SERVICE_8100_CHECK_TCP=true"
-       - "SERVICE_8100_CHECK_INTERVAL=15s"
-       - "SERVICE_8100_CHECK_INITIAL_STATUS=passing"
-
-
    snmptrap:
       image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.collectors.snmptrap:{{ dcae_docker_snmptrap }}"
-      container_name: static-dcaegen2-collectors-snmptrap
-      hostname: static-dcaegen2-collectors-snmptrap
+      container_name: "static-dcaegen2-collectors-snmptrap"
+      restart: "always"
+      hostname: "static-dcaegen2-collectors-snmptrap"
       environment:
        - "DMAAPHOST={{ mr_ip_addr }}"
        - "CONSUL_HOST=consul"
@@ -51,8 +12,9 @@
        - "CONFIG_BINDING_SERVICE=config_binding_service"
        - "SERVICE_NAME=static-dcaegen2-collectors-snmptrap"
        - "HOSTNAME=static-dcaegen2-collectors-snmptrap"
+       - "HOSTALIASES=/etc/host.aliases"
       ports:
-       - "162:162/udp"
+       - "162:6162/udp"
       labels:
        - "SERVICE_NAME=static-dcaegen2-collectors-snmptrap"
        - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/snmptrap/bin/healthcheck.sh"
diff --git a/heat/register.sh b/heat/register.sh
index d13b925..5b0b9f3 100755
--- a/heat/register.sh
+++ b/heat/register.sh
@@ -33,10 +33,20 @@
 SRVCNAME_MVP_VES="mvp-dcaegen2-collectors-ves"
 HOSTNAME_MVP_TCA="mvp-dcaegen2-analytics-tca"
 SRVCNAME_MVP_TCA="mvp-dcaegen2-analytics-tca"
-HOSTNAME_MVP_HR="mvp-dcae-analytics-holmes-rule-management"
-SRVCNAME_MVP_HR="mvp-dcae-analytics-holmes-rule-management"
-HOSTNAME_MVP_HE="mvp-dcae-analytics-holmes-engine-management"
-SRVCNAME_MVP_HE="mvp-dcae-analytics-holmes-engine-management"
+HOSTNAME_MVP_HR="mvp-dcaegen2-analytics-holmes-rule-management"
+SRVCNAME_MVP_HR="mvp-dcaegen2-analytics-holmes-rule-management"
+HOSTNAME_MVP_HE="mvp-dcaegen2-analytics-holmes-engine-management"
+SRVCNAME_MVP_HE="mvp-dcaegen2-analytics-holmes-engine-management"
+
+# R2 PLUS service components
+HOSTNAME_STATIC_SNMPTRAP="static-dcaegen2-collectors-snmptrap"
+SRVCNAME_STATIC_SNMPTRAP="static-dcaegen2-collectors-snmptrap"
+HOSTNAME_STATIC_MAPPER="static-dcaegen2-services-mapper"
+SRVCNAME_STATIC_MAPPER="static-dcaegen2-services-mapper"
+HOSTNAME_STATIC_HEARTBEAT="static-dcaegen2-services-heartbeat"
+SRVCNAME_STATIC_HEARTBEAT="static-dcaegen2-services-heartbeat"
+HOSTNAME_STATIC_PRH="static-dcaegen2-services-prh"
+SRVCNAME_STATIC_PRH="static-dcaegen2-services-prh"
 
 
 # registering docker host
@@ -239,6 +249,7 @@
 # deployment handler
 REGKV='
 {
+  "logLevel": "DEBUG",
   "cloudify": {
     "protocol": "http"
   },
@@ -260,14 +271,14 @@
   "collector.schema.checkflag": "1",
   "collector.dmaap.streamid": "fault=ves_fault|syslog=ves_syslog|heartbeat=ves_heartbeat|measurementsForVfScaling=ves_measurement|mobileFlow=ves_mobileflow|other=ves_other|stateChange=ves_statechange|thresholdCrossingAlert=ves_thresholdCrossingAlert|voiceQuality=ves_voicequality|sipSignaling=ves_sipsignaling",
   "collector.service.port": "8080",
-  "collector.schema.file": "{\"v1\":\"./etc/CommonEventFormat_27.2.json\",\"v2\":\"./etc/CommonEventFormat_27.2.json\",\"v3\":\"./etc/CommonEventFormat_27.2.json\",\"v4\":\"./etc/CommonEventFormat_27.2.json\",\"v5\":\"./etc/CommonEventFormat_28.4.1.json\"}\",
+  "collector.schema.file": "{\"v1\":\"./etc/CommonEventFormat_27.2.json\",\"v2\":\"./etc/CommonEventFormat_27.2.json\",\"v3\":\"./etc/CommonEventFormat_27.2.json\",\"v4\":\"./etc/CommonEventFormat_27.2.json\",\"v5\":\"./etc/CommonEventFormat_28.4.1.json\"}",
   "collector.keystore.passwordfile": "/opt/app/VESCollector/etc/passwordfile",
   "collector.inputQueue.maxPending": "8096",
   "streams_publishes": {
     "ves_measurement": {
       "type": "message_router",
       "dmaap_info": {
-        "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.SEC_MEASUREMENT_OUTPUT/"
+        "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.VES_MEASUREMENT_OUTPUT/"
       }
     }, 
     "ves_fault": {
@@ -375,7 +386,7 @@
 # TCA pref
 REGKV='{
   "tca_policy": "{\"domain\":\"measurementsForVfScaling\",\"metricsPerEventName\":[{\"eventName\":\"vFirewallBroadcastPackets\",\"controlLoopSchemaType\":\"VNF\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":300,\"direction\":\"LESS_OR_EQUAL\",\"severity\":\"MAJOR\",\"closedLoopEventStatus\":\"ONSET\"},{\"closedLoopControlName\":\"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":700,\"direction\":\"GREATER_OR_EQUAL\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]},{\"eventName\":\"vLoadBalancer\",\"controlLoopSchemaType\":\"VM\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":300,\"direction\":\"GREATER_OR_EQUAL\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]},{\"eventName\":\"Measurement_vGMUX\",\"controlLoopSchemaType\":\"VNF\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\",\"thresholdValue\":0,\"direction\":\"EQUAL\",\"severity\":\"MAJOR\",\"closedLoopEventStatus\":\"ABATED\"},{\"closedLoopControlName\":\"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\",\"thresholdValue\":0,\"direction\":\"GREATER\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]}]}",
-  "subscriberTopicName": "unauthenticated.SEC_MEASUREMENT_OUTPUT",
+  "subscriberTopicName": "unauthenticated.VES_MEASUREMENT_OUTPUT",
   "subscriberTimeoutMS": "-1",
   "subscriberProtocol": "http",
   "subscriberPollingInterval": "30000",
@@ -408,3 +419,69 @@
 curl -v -X PUT -H "Content-Type: application/json" \
 --data "${REGKV}" \
 "http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-analytics-tca:preferences"
+
+
+
+# SNMP Trap Collector
+REGKV='{
+"snmptrap.version": "1.3.0",
+"snmptrap.title": "ONAP SNMP Trap Receiver" ,
+"protocols.transport": "udp",
+"protocols.ipv4_interface": "0.0.0.0",
+"protocols.ipv4_port": 6162,
+"protocols.ipv6_interface": "::1",
+"protocols.ipv6_port": 6162,
+"cache.dns_cache_ttl_seconds": 60,
+"publisher.http_timeout_milliseconds": 1500,
+"publisher.http_retries": 3,
+"publisher.http_milliseconds_between_retries": 750,
+"publisher.http_primary_publisher": "true",
+"publisher.http_peer_publisher": "unavailable",
+"publisher.max_traps_between_publishes": 10,
+"publisher.max_milliseconds_between_publishes": 10000,
+    "streams_publishes": {
+            "sec_fault_unsecure": {
+                "type": "message_router",
+                "aaf_password": null,
+                "dmaap_info": {
+                    "location": "mtl5",
+                    "client_id": null,
+                    "client_role": null,
+                    "topic_url": "http://{{ mr_ip_addr }}:3904/events/ONAP-COLLECTOR-SNMPTRAP"
+                },
+                "aaf_username": null
+            }
+    },
+"files.runtime_base_dir": "/opt/app/snmptrap",
+"files.log_dir": "logs",
+"files.data_dir": "data",
+"files.pid_dir": "tmp",
+"files.arriving_traps_log": "snmptrapd_arriving_traps.log",
+"files.snmptrapd_diag": "snmptrapd_prog_diag.log",
+"files.traps_stats_log": "snmptrapd_stats.csv",
+"files.perm_status_file": "snmptrapd_status.log",
+"files.eelf_base_dir": "/opt/app/snmptrap/logs",
+"files.eelf_error": "error.log",
+"files.eelf_debug": "debug.log",
+"files.eelf_audit": "audit.log",
+"files.eelf_metrics": "metrics.log",
+"files.roll_frequency": "hour",
+"files.minimum_severity_to_log": 1,
+"trap_def.1.trap_oid" : ".1.3.6.1.4.1.74.2.46.12.1.1",
+"trap_def.1.trap_category": "ONAP-COLLECTOR-SNMPTRAP",
+"trap_def.2.trap_oid" : "*",
+"trap_def.2.trap_category": "ONAP-COLLECTOR-SNMPTRAP",
+"stormwatch.1.stormwatch_oid" : ".1.3.6.1.4.1.74.2.46.12.1.1",
+"stormwatch.1.low_water_rearm_per_minute" : "5",
+"stormwatch.1.high_water_arm_per_minute" : "100",
+"stormwatch.2.stormwatch_oid" : ".1.3.6.1.4.1.74.2.46.12.1.2",
+"stormwatch.2.low_water_rearm_per_minute" : "2",
+"stormwatch.2.high_water_arm_per_minute" : "200",
+"stormwatch.3.stormwatch_oid" : ".1.3.6.1.4.1.74.2.46.12.1.2",
+"stormwatch.3.low_water_rearm_per_minute" : "2",
+"stormwatch.3.high_water_arm_per_minute" : "200"
+}'
+curl -v -X PUT -H "Content-Type: application/json" \
+--data "${REGKV}" \
+"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SRVCNAME_STATIC_SNMPTRAP}"
+
diff --git a/heat/setup.sh b/heat/setup.sh
index a51f5ae..0014644 100755
--- a/heat/setup.sh
+++ b/heat/setup.sh
@@ -44,7 +44,7 @@
 cfy profiles use 127.0.0.1 -u admin -p admin -t default_tenant
 cfy status
 cd /tmp/bin
-./build-plugins.sh https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R2 https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases
+./build-plugins.sh https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R3 https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases
 for wagon in ./wagons/*.wgn; do cfy plugins upload \$wagon ; done
 deactivate
 EOL
diff --git a/k8s-bootstrap-container/bootstrap.sh b/k8s-bootstrap-container/bootstrap.sh
index e043460..6d39404 100755
--- a/k8s-bootstrap-container/bootstrap.sh
+++ b/k8s-bootstrap-container/bootstrap.sh
@@ -19,6 +19,8 @@
 # Expects:
 #   CM address (IP or DNS) in CMADDR environment variable
 #   CM password in CMPASS environment variable (assumes user is "admin")
+#   ONAP common Kubernetes namespace in ONAP_NAMESPACE environment variable
+#   If DCAE components are deployed in a separate Kubernetes namespace, that namespace in DCAE_NAMESPACE variable.
 #   Consul address with port in CONSUL variable
 #   Plugin wagon files in /wagons
 # 	Blueprints for components to be installed in /blueprints
@@ -30,11 +32,21 @@
 # Consul service registration data
 CBS_REG='{"ID": "dcae-cbs0", "Name": "config_binding_service", "Address": "config-binding-service", "Port": 10000}'
 CBS_REG1='{"ID": "dcae-cbs1", "Name": "config-binding-service", "Address": "config-binding-service", "Port": 10000}'
-CM_REG='{"ID": "dcae-cm0", "Name": "cloudify_manager", "Address": "cloudify-manager.onap", "Port": 80}'
 INV_REG='{"ID": "dcae-inv0", "Name": "inventory", "Address": "inventory", "Port": 8080}'
-# Policy handler will be looked up from a plugin on CM, which is running in the "onap" namespace,
-# so the Address field has to add the .dcae qualifier.
-PH_REG='{"ID": "dcae-ph0", "Name": "policy_handler", "Address": "policy-handler.dcae", "Port": 25577}'
+HE_REG='{"ID": "dcae-he0", "Name": "holmes-engine-mgmt", "Address": "holmes-engine-mgmt", "Port": 9102}'
+HR_REG='{"ID": "dcae-hr0", "Name": "holmes-rule-mgmt", "Address": "holmes-rule-mgmt", "Port": 9101}'
+
+# Cloudify Manager will always be in the ONAP namespace.
+CM_REG='{"ID": "dcae-cm0", "Name": "cloudify_manager", "Port": 80, "Address": "dcae-cloudify-manager.'${ONAP_NAMESPACE}'"}'
+# Policy handler will be looked up from a plugin on CM.  If DCAE components are running in a different k8s
+# namespace than CM (which always runs in the common ONAP namespace), then the policy handler address must
+# be qualified with the DCAE namespace.
+PH_REG='{"ID": "dcae-ph0", "Name": "policy_handler", "Port": 25577, "Address": "policy-handler'
+if [ ! -z "${DCAE_NAMESPACE}" ]
+then
+	PH_REG="${PH_REG}.${DCAE_NAMESPACE}"
+fi
+PH_REG="${PH_REG}\"}"
 
 # Deploy components
 # $1 -- name (for bp and deployment)
@@ -66,6 +78,8 @@
 curl -v -X PUT -H "Content-Type: application/json" --data "${CM_REG}" ${CONSUL}/v1/agent/service/register
 curl -v -X PUT -H "Content-Type: application/json" --data "${INV_REG}" ${CONSUL}/v1/agent/service/register
 curl -v -X PUT -H "Content-Type: application/json" --data "${PH_REG}" ${CONSUL}/v1/agent/service/register
+curl -v -X PUT -H "Content-Type: application/json" --data "${HE_REG}" ${CONSUL}/v1/agent/service/register
+curl -v -X PUT -H "Content-Type: application/json" --data "${HR_REG}" ${CONSUL}/v1/agent/service/register
 
 # Store the CM password into a Cloudify secret
 cfy secret create -s ${CMPASS} cmpass
@@ -81,6 +95,8 @@
 done
 set -e
 
+set +e
+# (don't let failure of one stop the script.  this is likely due to image pull taking too long)
 # Deploy platform components
 deploy config_binding_service k8s-config_binding_service.yaml k8s-config_binding_service-inputs.yaml
 deploy inventory k8s-inventory.yaml k8s-inventory-inputs.yaml
@@ -89,10 +105,9 @@
 deploy pgaas_initdb k8s-pgaas-initdb.yaml k8s-pgaas-initdb-inputs.yaml
 
 # Deploy service components
-# (don't let failure of one stop the script)
-set +e
 deploy tca k8s-tca.yaml k8s-tca-inputs.yaml
 deploy ves k8s-ves.yaml k8s-ves-inputs.yaml
+deploy prh k8s-prh.yaml k8s-prh-inputs.yaml
 # holmes_rules must be deployed before holmes_engine
 deploy holmes_rules k8s-holmes-rules.yaml k8s-holmes_rules-inputs.yaml
 deploy holmes_engine k8s-holmes-engine.yaml k8s-holmes_engine-inputs.yaml
diff --git a/k8s-bootstrap-container/load-blueprints.sh b/k8s-bootstrap-container/load-blueprints.sh
index 2037427..9410f61 100755
--- a/k8s-bootstrap-container/load-blueprints.sh
+++ b/k8s-bootstrap-container/load-blueprints.sh
@@ -15,7 +15,8 @@
 k8s-policy_handler.yaml \
 k8s-pgaas-initdb.yaml \
 k8s-tca.yaml \
-k8s-ves.yaml
+k8s-ves.yaml \
+k8s-prh.yaml
 "
 
 BPDEST=blueprints
diff --git a/k8s-bootstrap-container/pom.xml b/k8s-bootstrap-container/pom.xml
index 85b99bc..a755f46 100644
--- a/k8s-bootstrap-container/pom.xml
+++ b/k8s-bootstrap-container/pom.xml
@@ -27,7 +27,7 @@
   <groupId>org.onap.dcaegen2.deployments</groupId>
   <artifactId>k8s-bootstrap-container</artifactId>
   <name>dcaegen2-deployments-k8s-bootstrap-container</name>
-  <version>1.1.8</version>
+  <version>1.2.0</version>
   <url>http://maven.apache.org</url>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
diff --git a/k8s-bootstrap-container/test-expand.sh b/k8s-bootstrap-container/test-expand.sh
index 1e929f5..62af674 100755
--- a/k8s-bootstrap-container/test-expand.sh
+++ b/k8s-bootstrap-container/test-expand.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 sed \
-  -e 's#{{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_plugins_releases }}#https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R2#' \
+  -e 's#{{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_plugins_releases }}#https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R3#' \
   -e 's#{{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}#https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins#' \
-  -e 's#{{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}#https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.blueprints/R2#' \
-Dockerfile-template > Dockerfile
\ No newline at end of file
+  -e 's#{{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}#https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.blueprints/R3#' \
+Dockerfile-template > Dockerfile
diff --git a/mvn-phase-script.sh b/mvn-phase-script.sh
index acada60..19a561a 100755
--- a/mvn-phase-script.sh
+++ b/mvn-phase-script.sh
@@ -28,7 +28,7 @@
 
 
 echo "MVN_RELEASE_TAG is set to [$MVN_RELEASE_TAG]"
-RELEASE_TAG=${MVN_RELEASE_TAG:-R2}
+RELEASE_TAG=${MVN_RELEASE_TAG:-R3}
 if [ "$RELEASE_TAG" != "R1" ]; then
   RELEASE_TAGGED_DIR="${RELEASE_TAG}/"
 else
diff --git a/tca-cdap-container/Dockerfile b/tca-cdap-container/Dockerfile
index 158c95a..2c57ff2 100644
--- a/tca-cdap-container/Dockerfile
+++ b/tca-cdap-container/Dockerfile
@@ -16,14 +16,15 @@
 
 FROM caskdata/cdap-standalone:4.1.2
 
-RUN apt-get update
-RUN apt-get install -y netcat jq wget vim iputils-ping
+RUN apt-get update && apt-get install -y netcat jq iputils-ping wget vim curl
 COPY get-tca.sh /opt/tca/get-tca.sh
 RUN /opt/tca/get-tca.sh
 COPY tca_app_config.json /opt/tca/tca_app_config.json
 COPY tca_app_preferences.json /opt/tca/tca_app_preferences.json
 COPY restart.sh /opt/tca/restart.sh
 RUN chmod 755 /opt/tca/restart.sh
+COPY mr-watchdog.sh /opt/tca/mr-watchdog.sh
+RUN chmod 755 /opt/tca/mr-watchdog.sh
 
 #COPY host.aliases /etc/host.aliases
 #RUN echo "export HOSTALIASES=/etc/host.aliases" >> /etc/profile
diff --git a/tca-cdap-container/README.txt b/tca-cdap-container/README.txt
deleted file mode 100644
index 62a7a09..0000000
--- a/tca-cdap-container/README.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Note:
-
-Although typically Java jar artifacts have SNAPSHOT version as a.b.c-SNAPSHOT, internally CDAP
-identifies the application as a.b.c.SNAPSHOT.  Thus, in app_config JSON we must refer to the 
-application as a.b.c.SNAPSHOT.  Otherwise we will have artifact not found error"
diff --git a/tca-cdap-container/get-tca.sh b/tca-cdap-container/get-tca.sh
index 66038b1..784d914 100755
--- a/tca-cdap-container/get-tca.sh
+++ b/tca-cdap-container/get-tca.sh
@@ -1,17 +1,47 @@
 #!/bin/bash
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
 
+  
 ARTIFACTPATH=${1:-/opt/tca/}
 PROTO='https'
 NEXUSREPO='nexus.onap.org'
-REPO='snapshots'
 GROUPID='org.onap.dcaegen2.analytics.tca'
 ARTIFACTID='dcae-analytics-cdap-tca'
-VERSION='2.2.0-SNAPSHOT'
 
-URL="${PROTO}://${NEXUSREPO}/service/local/repositories/${REPO}/content/${GROUPID//.//}/${ARTIFACTID}/${VERSION}/maven-metadata.xml"
-VT=$(wget --no-check-certificate -O- $URL | grep -m 1 \<value\> | sed -e 's/<value>\(.*\)<\/value>/\1/' | sed -e 's/ //g')
+#REPO='snapshots'
+REPO='releases'
+VERSION=''
 
+# if VERSION is not specified, find out the latest version
+if [ -z "$VERSION" ]; then
+  URL="${PROTO}://${NEXUSREPO}/service/local/repositories/${REPO}/content/${GROUPID//.//}/${ARTIFACTID}/maven-metadata.xml"
+  VERSION=$(wget --no-check-certificate -O- $URL | grep -m 1 \<latest\> | sed -e 's/<latest>\(.*\)<\/latest>/\1/' | sed -e 's/ //g')
+fi
+
+echo "Getting version $VERSION of $GROUPID.$ARTIFACTID from $REPO repo on $NEXUSREPO"
+
+if [ "$REPO" == "snapshots" ]; then
+  # SNOTSHOT repo container many snapshots for each version.  get the newest among them
+  URL="${PROTO}://${NEXUSREPO}/service/local/repositories/${REPO}/content/${GROUPID//.//}/${ARTIFACTID}/${VERSION}/maven-metadata.xml"
+  VT=$(wget --no-check-certificate -O- "$URL" | grep -m 1 \<value\> | sed -e 's/<value>\(.*\)<\/value>/\1/' | sed -e 's/ //g')
+else
+  VT=${VERSION}
+fi
 URL="${PROTO}://${NEXUSREPO}/service/local/repositories/${REPO}/content/${GROUPID//.//}/${ARTIFACTID}/${VERSION}/${ARTIFACTID}-${VT}.jar"
-#wget --no-check-certificate "${URL}" -O "${ARTIFACTPATH}${ARTIFACTID}-${VERSION%-SNAPSHOT}.jar"
-wget --no-check-certificate "${URL}" -O "${ARTIFACTPATH}${ARTIFACTID}.${VERSION}.jar"
+echo "Fetching $URL"
 
+wget --no-check-certificate "${URL}" -O "${ARTIFACTPATH}${ARTIFACTID}.${VERSION}.jar"
diff --git a/tca-cdap-container/mr-watchdog.sh b/tca-cdap-container/mr-watchdog.sh
new file mode 100755
index 0000000..fa623a1
--- /dev/null
+++ b/tca-cdap-container/mr-watchdog.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+
+
+SUB_TOPIC=${3:-unauthenticated.VES_MEASUREMENT_OUTPUT}
+MR_LOCATION=${1:-10.0.11.1}
+MR_PORT=${2:-3904}
+MR_PROTO='http'
+
+
+TOPIC_LIST_URL="${MR_PROTO}://${MR_LOCATION}:${MR_PORT}/topics"
+TEST_PUB_URL="${MR_PROTO}://${MR_LOCATION}:${MR_PORT}/events/${SUB_TOPIC}"
+
+unset RES
+echo "==> Check topic [${SUB_TOPIC}] availbility on ${MR_LOCATION}:${MR_PORT}"
+until [ -n "$RES" ]; do
+    URL="$TOPIC_LIST_URL"
+    HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" "$URL")
+    HTTP_BODY=$(echo "$HTTP_RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g')
+    HTTP_STATUS=$(echo "$HTTP_RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
+    if [ "${HTTP_STATUS}" != "200" ]; then
+        echo "   ==> MR topic listing not ready, retry in 30 seconds"
+        sleep 30
+        continue
+    fi
+
+    echo "   ==> MR topic listing received, check topic availbility"
+    RES=$(echo "${HTTP_BODY}" |jq .topics |grep "\"$SUB_TOPIC\"")
+    if [ -z "${RES}" ]; then
+        echo "      ==> No topic [${SUB_TOPIC}] found, send test publish"
+        URL="$TEST_PUB_URL"
+        HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -H "Content-Type:text/plain" -X POST -d "{}" "$URL")
+        HTTP_BODY=$(echo "$HTTP_RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g')
+        HTTP_STATUS=$(echo "$HTTP_RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
+         
+        if [ "$HTTP_STATUS" != "200" ]; then
+            echo "      ==> Testing MR topic publishing received status $HTTP_STATUS != 200, retesting in 30 seconds"
+            sleep 30
+        else
+            echo "      ==> Testing MR topic publishing received status $HTTP_STATUS, topic [$SUB_TOPIC] created"
+        fi
+    fi
+done
+echo "==> Topic [${SUB_TOPIC}] ready"
diff --git a/tca-cdap-container/pom.xml b/tca-cdap-container/pom.xml
index cab867b..212feab 100644
--- a/tca-cdap-container/pom.xml
+++ b/tca-cdap-container/pom.xml
@@ -27,7 +27,7 @@
   <groupId>org.onap.dcaegen2.deployments</groupId>
   <artifactId>tca-cdap-container</artifactId>
   <name>dcaegen2-deployments-tca-cdap-container</name>
-  <version>1.0.0</version>
+  <version>1.1.0</version>
   <url>http://maven.apache.org</url>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
diff --git a/tca-cdap-container/restart.sh b/tca-cdap-container/restart.sh
index e962ee5..6d0c60f 100755
--- a/tca-cdap-container/restart.sh
+++ b/tca-cdap-container/restart.sh
@@ -23,9 +23,9 @@
 TCA_APPNAME='dcae-tca'
 
 TCA_ARTIFACT='dcae-analytics-cdap-tca'
-TCA_ARTIFACT_VERSION='2.2.0-SNAPSHOT'
 TCA_FILE_PATH='/opt/tca'
-TCA_JAR="${TCA_FILE_PATH}/${TCA_ARTIFACT}.${TCA_ARTIFACT_VERSION}.jar"
+TCA_JAR="$(ls -1r ${TCA_FILE_PATH}/${TCA_ARTIFACT}*.jar | head -1)"
+TCA_ARTIFACT_VERSION=$(echo "$TCA_JAR" |rev |cut -f 2-4 -d '.' |rev)
 TCA_APP_CONF="${TCA_FILE_PATH}/tca_app_config.json"
 TCA_CONF="${TCA_FILE_PATH}/tca_config.json"
 TCA_PREF="${TCA_FILE_PATH}/tca_app_preferences.json"
@@ -36,41 +36,33 @@
 TCA_PATH_APP="${CDAP_HOST}:${CDAP_PORT}/v3/namespaces/${TCA_NAMESPACE}/apps/${TCA_APPNAME}"
 TCA_PATH_ARTIFACT="${CDAP_HOST}:${CDAP_PORT}/v3/namespaces/${TCA_NAMESPACE}/artifacts"
 
+MR_WATCHDOG_PATH="${TCA_FILE_PATH}/mr-watchdog.sh"
 
-CONSUL_HOST=${CONSU_HOST:-consul}
-CONSUL_PORT=${CONSU_PORT:-8500}
+
+WORKER_COUNT='0'
+
+CONSUL_HOST=${CONSUL_HOST:-consul}
+CONSUL_PORT=${CONSUL_PORT:-8500}
 CONFIG_BINDING_SERVICE=${CONFIG_BINDING_SERVICE:-config_binding_service}
 
 CBS_SERVICE_NAME=${CONFIG_BINDING_SERVICE}
 
-unset CBS_HOST
-unset CBS_PORT
-until [ ! -z "$CBS_HOST" ]; do
-  echo "Retrieving host and port for ${CBS_SERVICE_NAME} from ${CONSUL_HOST}:${CONSUL_PORT}" 
-  sleep 2
-  CBS_HOST=$(curl -s "${CONSUL_HOST}:${CONSUL_PORT}/v1/catalog/service/${CBS_SERVICE_NAME}" |jq .[0].ServiceAddress |sed -e 's/\"//g')
-  CBS_PORT=$(curl -s "${CONSUL_HOST}:${CONSUL_PORT}/v1/catalog/service/${CBS_SERVICE_NAME}" |jq .[0].ServicePort |sed -e 's/\"//g')
-done
-echo "Retrieved host and port for ${CBS_SERVICE_NAME} as ${CBS_HOST}:${CBS_PORT}" 
-CBS_HOST=${CBS_HOST:-config-binding-service}
-CBS_PORT=${CBS_PORT:-10000}
-
 #Changing to HOSTNAME parameter for consistency with k8s deploy
 MY_NAME=${HOSTNAME:-tca}
 
-echo "TCA environment: I am ${MY_NAME}, consul at ${CONSUL_HOST}:${CONSUL_PORT}, CBS at ${CBS_HOST}:${CBS_PORT}, service name ${CBS_SERVICE_NAME}"
-
 
 echo "Generting preference file"
+DMAAPSUBGROUP=${DMAAPSUBGROUP:-OpenDCAEc12}
+DMAAPSUBID=${DMAAPSUBID:=c12}
 sed -i 's/{{DMAAPHOST}}/'"${DMAAPHOST}"'/g' ${TCA_PREF}
 sed -i 's/{{DMAAPPORT}}/'"${DMAAPPORT}"'/g' ${TCA_PREF}
 sed -i 's/{{DMAAPPUBTOPIC}}/'"${DMAAPPUBTOPIC}"'/g' ${TCA_PREF}
 sed -i 's/{{DMAAPSUBTOPIC}}/'"${DMAAPSUBTOPIC}"'/g' ${TCA_PREF}
-sed -i 's/{{DMAAPSUBGROUP}}/OpenDCAEc12/g' ${TCA_PREF}
-sed -i 's/{{DMAAPSUBID}}/c12/g' ${TCA_PREF}
+sed -i 's/{{DMAAPSUBGROUP}}/'"${DMAAPSUBGROUP}"'/g' ${TCA_PREF}
+sed -i 's/{{DMAAPSUBID}}/'"${DMAAPSUBID}"'/g' ${TCA_PREF}
 sed -i 's/{{AAIHOST}}/'"${AAIHOST}"'/g' ${TCA_PREF}
 sed -i 's/{{AAIPORT}}/'"${AAIPORT}"'/g' ${TCA_PREF}
-if [ -z $REDISHOSTPORT ]; then
+if [ -z "$REDISHOSTPORT" ]; then
   sed -i 's/{{REDISHOSTPORT}}/NONE/g' ${TCA_PREF}
   sed -i 's/{{REDISCACHING}}/false/g' ${TCA_PREF}
 else
@@ -135,70 +127,112 @@
 
 
 function tca_status {
+    WORKER_COUNT='0'
     echo
-    echo "TCADMaaPMRPublisherWorker status: "
-    curl -s "http://${TCA_PATH_APP}/workers/TCADMaaPMRPublisherWorker/status"
+    STATUS=$(curl -s "http://${TCA_PATH_APP}/workers/TCADMaaPMRPublisherWorker/status")
+    echo "TCADMaaPMRPublisherWorker status: $STATUS"
+    INC=$(echo "$STATUS" | jq . |grep RUNNING |wc -l)
+    WORKER_COUNT=$((WORKER_COUNT+INC))
+
+    STATUS=$(curl -s "http://${TCA_PATH_APP}/workers/TCADMaaPMRSubscriberWorker/status")
+    echo "TCADMaaPMRSubscriberWorker status: $STATUS"
+    INC=$(echo "$STATUS" | jq . |grep RUNNING |wc -l)
+    WORKER_COUNT=$((WORKER_COUNT+INC))
+
+    STATUS=$(curl -s "http://${TCA_PATH_APP}/flows/TCAVESCollectorFlow/status")
+    echo "TCAVESCollectorFlow status: $STATUS"
+    INC=$(echo "$STATUS" | jq . |grep RUNNING |wc -l)
+    WORKER_COUNT=$((WORKER_COUNT+INC))
     echo
-    echo "TCADMaaPMRSubscriberWorker status: "
-    curl -s "http://${TCA_PATH_APP}/workers/TCADMaaPMRSubscriberWorker/status"
-    echo
-    echo "TCAVESCollectorFlow status"
-    curl -s "http://${TCA_PATH_APP}/flows/TCAVESCollectorFlow/status"
-    echo; echo
 }
 
 
-function tca_poll_policy {
-    MY_NAME=${HOSTNAME:-tca}
+function tca_restart {
+    MR_HOST=$(jq .subscriberHostName ${TCA_PREF} |sed -e 's/\"//g')
+    MR_PORT=$(jq .subscriberHostPort ${TCA_PREF} |sed -e 's/\"//g')
+    MR_TOPIC=$(jq .subscriberTopicName ${TCA_PREF}  |sed -e 's/\"//g')
+    echo "Verifying DMaaP topic: ${MR_TOPIC}@${MR_HOST}:${MR_PORT} (will block until topic ready)"
+    "${MR_WATCHDOG_PATH}" "${MR_HOST}" "${MR_PORT}" "${MR_TOPIC}"
+    tca_stop
+    tca_delete
+    tca_load_artifact
+    tca_load_conf
+    tca_start
+    sleep 5
+    tca_status
+}
 
+function tca_poll_policy {
     URL0="${CBS_HOST}:${CBS_PORT}/service_component_all/${MY_NAME}"
-    echo "tca_poll_policy: Retrieving configuration file at ${URL0}"
+    echo "tca_poll_policy: Retrieving all-in-one config at ${URL0}"
     HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" "$URL0")
-    HTTP_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g')
-    HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
+    HTTP_BODY=$(echo "$HTTP_RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g')
+    HTTP_STATUS=$(echo "$HTTP_RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
 
     if [ "$HTTP_STATUS" != "200" ]; then
+        echo "tca_poll_policy: Retrieving all-in-one config failed with status $HTTP_STATUS"
         URL1="${CBS_HOST}:${CBS_PORT}/service_component/${MY_NAME}"
-        echo "tca_poll_policy: Retrieving configuration file at ${URL1}"
+        echo "tca_poll_policy: Retrieving app config only at ${URL1}"
         HTTP_RESPONSE1=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" "$URL1")
-        HTTP_BODY1=$(echo $HTTP_RESPONSE1 | sed -e 's/HTTPSTATUS\:.*//g')
-        HTTP_STATUS1=$(echo $HTTP_RESPONSE1 | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
+        HTTP_BODY1=$(echo "$HTTP_RESPONSE1" | sed -e 's/HTTPSTATUS\:.*//g')
+        HTTP_STATUS1=$(echo "$HTTP_RESPONSE1" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
         if [ "$HTTP_STATUS1" != "200" ]; then
-            echo "receiving $HTTP_RESPONSE1 from CBS"
+            echo "tca_poll_policy: Retrieving app config only failed with status $HTTP_STATUS1"
             return
         fi
 
         URL2="$URL1:preferences"
-        echo "tca_poll_policy: Retrieving preferences file at ${URL1}"
+        echo "tca_poll_policy: Retrieving app preferences only at ${URL2}"
         HTTP_RESPONSE2=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" "$URL2")
-        HTTP_BODY2=$(echo $HTTP_RESPONSE2 | sed -e 's/HTTPSTATUS\:.*//g')
-        HTTP_STATUS2=$(echo $HTTP_RESPONSE2 | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
+        HTTP_BODY2=$(echo "$HTTP_RESPONSE2" | sed -e 's/HTTPSTATUS\:.*//g')
+        HTTP_STATUS2=$(echo "$HTTP_RESPONSE2" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
         if [ "$HTTP_STATUS2" != "200" ]; then
-            echo "receiving $HTTP_RESPONSE2 from CBS"
+            echo "tca_poll_policy: Retrieving app preferences only failed with status $HTTP_STATUS2"
+            return
+        fi
+  
+        if [[ "$CONFIG" == "null"  || "$PREF" == "null" ]]; then
+            echo "tca_poll_policy: either app config or app preferences being empty, config not applicable"
             return
         fi
 
-        echo $HTTP_BODY1 | jq . --sort-keys > "${TCA_CONF_TEMP}"
-        echo $HTTP_BODY2 | jq . --sort-keys > "${TCA_PREF_TEMP}"
+        echo "$HTTP_BODY1" | jq . --sort-keys > "${TCA_CONF_TEMP}"
+        echo "$HTTP_BODY2" | jq . --sort-keys > "${TCA_PREF_TEMP}"
     else
-        CONFIG=$(echo $HTTP_BODY | jq .config.app_config)
-        PREF=$(echo $HTTP_BODY | jq .config.app_preferences)
-        POLICY=$(echo $HTTP_BODY | jq .policies.items[0].config.content.tca_policy)
+        CONFIG=$(echo "$HTTP_BODY" | jq .config.app_config)
+        PREF=$(echo "$HTTP_BODY" | jq .config.app_preferences)
+        POLICY=$(echo "$HTTP_BODY" | jq .policies.items[0].config.content.tca_policy)
 
-	## Check if policy content under tca_policy is returned null
-	## null indicates no active policy flow; hence use configuration loaded 
-	## from blueprint
 
-        if [ $POLICY==null ]; then
-		# tca_policy through blueprint
-		NEWPREF=${PREF}
+        if [[ "$CONFIG" == "null"  || "$PREF" == "null" ]]; then
+            echo "tca_poll_policy: CONFIG received is parsed to be empty, trying to parse using R1 format" 
+            CONFIG=$(echo "$HTTP_BODY" | jq .config)
+            NEWPREF=$(echo "$HTTP_BODY" | jq .preferences)
+
+            #echo "CONFIG is [$CONFIG]"
+            #echo "NEWPREF is [$NEWPREF]"
         else
-		# tca_policy through active policy flow through PH
-        	NEWPREF=$(echo $PREF | jq --arg tca_policy "$POLICY" '. + {$tca_policy}')
+            echo "tca_poll_policy: CONFIG is [${CONFIG}], PREF is [${PREF}], POLICY is [${POLICY}]"
+	    ## Check if policy content under tca_policy is returned null
+	    ## null indicates no active policy flow; hence use configuration loaded 
+	    ## from blueprint
+            if [ "$POLICY" == "null" ]; then
+                # tca_policy through blueprint
+                NEWPREF=${PREF}
+            else
+                # tca_policy through active policy flow through PH
+                NEWPREF=$(echo "$PREF" | jq --arg tca_policy "$POLICY" '. + {$tca_policy}')
+            fi
+            NEWPREF=$(echo "$NEWPREF" | sed 's/\\n//g') 
         fi
-        NEWPREF=$(echo $NEWPREF | sed 's/\\n//g')
-        echo $CONFIG | jq . --sort-keys > "${TCA_CONF_TEMP}"
-        echo $NEWPREF | jq . --sort-keys > "${TCA_PREF_TEMP}"
+       
+        if [[ "$CONFIG" == "null"  || "$NEWPREF" == "null" ]]; then
+             echo "tca_poll_policy: either app config or app preferences being empty, config not applicable"
+             return
+        fi
+
+        echo "$CONFIG" | jq . --sort-keys > "${TCA_CONF_TEMP}"
+        echo "$NEWPREF" | jq . --sort-keys > "${TCA_PREF_TEMP}"
     fi
 
     if [ ! -e "${TCA_CONF_TEMP}" ] || [ "$(ls -sh ${TCA_CONF_TEMP} |cut -f1 -d' ' |sed -e 's/[^0-9]//g')"  -lt "1" ]; then
@@ -229,7 +263,7 @@
     CONSUMERID=$(jq .subscriberConsumerId ${TCA_PREF_TEMP} |sed -e 's/\"//g')
     if ! (echo "$CONSUMERID" |grep "$HOSTID"); then
         CONSUMERID="${CONSUMERID}-${HOSTID}"
-        jq --arg CID ${CONSUMERID} '.subscriberConsumerId = $CID' < "${TCA_PREF_TEMP}" > "${TCA_PREF_TEMP}2"
+        jq --arg CID "${CONSUMERID}" '.subscriberConsumerId = $CID' < "${TCA_PREF_TEMP}" > "${TCA_PREF_TEMP}2"
         mv "${TCA_PREF_TEMP}2" "${TCA_PREF_TEMP}"
     fi 
     if ! diff ${TCA_PREF} ${TCA_PREF_TEMP} ; then
@@ -238,61 +272,79 @@
         PERF_CHANGED=1
     fi
 
-    if [[ "$PERF_CHANGED" == "1" || "$CONF_CHANGED" == "1" ]]; then 
-	tca_stop
-	tca_delete
-        tca_load_artifact
-	tca_load_conf
-	tca_start
-	tca_status
+    if [[ "$PERF_CHANGED" == "1" || "$CONF_CHANGED" == "1" ]]; then
+        echo "Newly received configuration/preference differ from the running instance's.  reload confg"
+        tca_restart
+    else
+        echo "Newly received configuration/preference identical from the running instance's"
     fi 
 }
 
 
 export PATH=${PATH}:/opt/cdap/sdk/bin
 
+
+echo "Starting TCA-CDAP in standalone mode"
+
 # starting CDAP SDK in background
 cdap sdk start 
 
-
-
-echo "Waiting CDAP ready on port 11015 ..."
+echo "CDAP Started, waiting CDAP ready on ${CDAP_HOST}:${CDAP_PORT} ..."
 while ! nc -z ${CDAP_HOST} ${CDAP_PORT}; do   
-  sleep 0.1 # wait for 1/10 of the second before check again
+  sleep 1 # wait for 1 second before check again
 done
-echo "CDAP has started"
-
 
 echo "Creating namespace cdap_tca_hi_lo ..."
 curl -s -X PUT "http://${CDAP_HOST}:${CDAP_PORT}/v3/namespaces/cdap_tca_hi_lo"
 
-
 # stop programs
 tca_stop
-
-
 # delete application
 tca_delete
-
-
 # load artifact
 tca_load_artifact
 tca_load_conf
-
-
 # start programs
 tca_start
 
-
 # get status of programs
 tca_status
 
+echo "TCA-CDAP standalone mode initialization completed, with $WORKER_COUNT / 3 up"
 
 
-while echo -n
+
+#Changing to HOSTNAME parameter for consistency with k8s deploy
+MY_NAME=${HOSTNAME:-tca}
+
+unset CBS_HOST
+unset CBS_PORT
+echo "TCA environment: I am ${MY_NAME}, consul at ${CONSUL_HOST}:${CONSUL_PORT}, CBS service name ${CBS_SERVICE_NAME}"
+
+while echo
 do
-    echo "======================================================"
-    date
-    tca_poll_policy
+    echo "======================================================> $(date)"
+    tca_status
+
+    while [ "$WORKER_COUNT" != "3" ]; do
+        echo "Status checking: worker count is $WORKER_COUNT, needs a reset"
+        sleep 5
+
+        tca_restart
+        echo "TCA restarted"
+    done
+
+
+    if [[ -z "$CBS_HOST" ||  -z "$CBS_PORT" ]]; then
+       echo "Retrieving host and port for ${CBS_SERVICE_NAME} from ${CONSUL_HOST}:${CONSUL_PORT}"
+       sleep 2
+       CBS_HOST=$(curl -s "${CONSUL_HOST}:${CONSUL_PORT}/v1/catalog/service/${CBS_SERVICE_NAME}" |jq .[0].ServiceAddress |sed -e 's/\"//g')
+       CBS_PORT=$(curl -s "${CONSUL_HOST}:${CONSUL_PORT}/v1/catalog/service/${CBS_SERVICE_NAME}" |jq .[0].ServicePort |sed -e 's/\"//g')
+       echo "CBS discovered to be at ${CBS_HOST}:${CBS_PORT}"
+    fi
+
+    if [ ! -z "$CBS_HOST" ] && [ ! -z "$CBS_PORT" ]; then
+       tca_poll_policy
+    fi
     sleep 30
 done
diff --git a/tca-cdap-container/tca_app_config.json b/tca-cdap-container/tca_app_config.json
index d6adcb7..24234dc 100644
--- a/tca-cdap-container/tca_app_config.json
+++ b/tca-cdap-container/tca_app_config.json
@@ -2,7 +2,7 @@
   "artifact": {
     "name": "dcae-analytics-cdap-tca",
     "scope": "user",
-    "version": "2.2.0.SNAPSHOT"
+    "version": "2.2.1"
   },
   "config": {
     "appDescription": "DCAE Analytics Threshold Crossing Alert Application",