Move consul config from shared NFS to configmap
Issue-ID: OOM-597
Change-Id: I708c3e9df16003a54462f76c6ffe513b270faae8
Signed-off-by: jasmineWen <jasmine.wen@amdocs.com>
diff --git a/kubernetes/consul/resources/config/consul-agent-config/aai-data-router-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-data-router-health.json
new file mode 100644
index 0000000..be41934
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/aai-data-router-health.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "A&AI Synapse Data Routing Service",
+ "checks": [
+ {
+ "id": "data-router-process",
+ "name": "Synapse Presence",
+ "script": "/consul/scripts/data-router-script.sh",
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json
new file mode 100644
index 0000000..6b42e0c
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json
@@ -0,0 +1,21 @@
+{
+ "service": {
+ "name": "A&AI HBase Health Check",
+ "checks": [
+ {
+ "id": "hbase-aai",
+ "name": "HBase Health Check",
+ "http": "http://hbase.{{ .Values.nsPrefix }}:8080/status/cluster",
+ "method": "GET",
+ "header": {
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/aai-model-loader-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-model-loader-health.json
new file mode 100644
index 0000000..044a844
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/aai-model-loader-health.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "A&AI Model Loader",
+ "checks": [
+ {
+ "id": "model-loader-process",
+ "name": "Model Loader Presence",
+ "script": "/consul/scripts/model-loader-script.sh",
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json
new file mode 100644
index 0000000..2a111d6
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json
@@ -0,0 +1,33 @@
+{
+ "service": {
+ "name": "A&AI Search Data Service",
+ "checks": [
+ {
+ "id": "elasticsearch",
+ "name": "Search Data Service Document Store",
+ "http": "http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/_cat/indices?v",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "elasticsearch-write-health",
+ "name": "Search Data Service Document Store Write Test",
+ "script": "/consul/scripts/aai-search-storage-write-script.sh",
+ "interval": "60s"
+ },
+ {
+ "id": "search-data-service-availability",
+ "name": "Search Data Service Availability",
+ "script": "curl -k --cert /consul/certs/client-cert-onap.crt.pem --cert-type PEM --key /consul/certs/client-cert-onap.key.pem --key-type PEM https://search-data-service.{{ .Values.nsPrefix }}:9509/services/search-data-service/v1/jaxrsExample/jaxrs-services/echo/up 2>&1 | grep 'Up'",
+ "interval": "15s"
+ },
+ {
+ "id": "search-data-service-api",
+ "name": "Search Data Service Operational Test",
+ "script": "/consul/scripts/search-data-service-availability.sh",
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json
new file mode 100644
index 0000000..0274cd5
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json
@@ -0,0 +1,46 @@
+{
+ "service": {
+ "name": "Active and Available Inventory",
+ "checks": [
+ {
+ "id": "aai-service",
+ "name": "Core A&AI",
+ "http": "https://aai-service.{{ .Values.nsPrefix }}:8443/aai/util/echo",
+ "header": {
+ "Authorization": ["Basic QUFJOkFBSQ=="],
+ "X-TransactionId": ["ConsulHealthCheck"],
+ "X-FromAppId": ["healthcheck"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "aai-resources",
+ "name": "Resources Microservice",
+ "http": "https://aai-resources.{{ .Values.nsPrefix }}:8447/aai/util/echo",
+ "header": {
+ "Authorization": ["Basic QUFJOkFBSQ=="],
+ "X-TransactionId": ["ConsulHealthCheck"],
+ "X-FromAppId": ["healthcheck"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "aai-traversal",
+ "name": "Traversal Microservice",
+ "http": "https://aai-traversal.{{ .Values.nsPrefix }}:8446/aai/util/echo",
+ "header": {
+ "Authorization": ["Basic QUFJOkFBSQ=="],
+ "X-TransactionId": ["ConsulHealthCheck"],
+ "X-FromAppId": ["healthcheck"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/aai-sparky-be-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-sparky-be-health.json
new file mode 100644
index 0000000..bf6305c
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/aai-sparky-be-health.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "A&AI UI Backend Service",
+ "checks": [
+ {
+ "id": "sparky-be-process",
+ "name": "UI Backend Presence",
+ "script": "/consul/scripts/sparky-be-script.sh",
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/aai-tabular-backend-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-tabular-backend-health.json
new file mode 100644
index 0000000..c7fc19b
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/aai-tabular-backend-health.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "A&AI Tabular Data Store",
+ "checks": [
+ {
+ "id": "tabular-backend",
+ "name": "Tabular Data Store Operational Test",
+ "script": "/consul/scripts/tabular-db-availability.sh",
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json b/kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json
new file mode 100644
index 0000000..9505246
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json
@@ -0,0 +1,22 @@
+{
+ "service": {
+ "name": "Health Check: APPC - Dgbuilder",
+ "checks": [
+ {
+ "id": "appc-dgbuilder",
+ "name": "APPC-Dgbuilder Server Health Check",
+ "http": "http://appc-dgbuilder.{{ .Values.nsPrefix }}:3000/",
+ "method": "HEAD",
+ "header": {
+ "Authorization": ["Basic ZGd1c2VyOnRlc3QxMjM="],
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/appc-health.json b/kubernetes/consul/resources/config/consul-agent-config/appc-health.json
new file mode 100644
index 0000000..86f2ce8
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/appc-health.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "Health Check: APPC",
+ "checks": [
+ {
+ "id": "appc-dbhost-healthcheck",
+ "name": "APPC DBHost Health Check",
+ "script": "/consul/scripts/appc-dbhost-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json
new file mode 100644
index 0000000..f36251a
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "Health Check: APPC-SDN-CTL-DB-01",
+ "checks": [
+ {
+ "id": "appc-sdnctldb01.{{ .Values.nsPrefix }}",
+ "name": "APPC SDNCTLDB01 Health Check",
+ "tcp": "appc-sdnctldb01.{{ .Values.nsPrefix }}:3306",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json
new file mode 100644
index 0000000..8c8171f
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "Health Check: APPC-SDN-CTL-DB-02",
+ "checks": [
+ {
+ "id": "appc-sdnctldb02.{{ .Values.nsPrefix }}",
+ "name": "APPC SDNCTLDB02 Health Check",
+ "tcp": "appc-sdnctldb02.{{ .Values.nsPrefix }}:3306",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json
new file mode 100644
index 0000000..ec6db9d
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json
@@ -0,0 +1,22 @@
+{
+ "service": {
+ "name": "Health Check: APPC - SDN Host",
+ "checks": [
+ {
+ "id": "appc-sdnhost",
+ "name": "APPC SDN Host Health Check",
+ "http": "http://appc-sdnhost.{{ .Values.nsPrefix }}:8282/apidoc/explorer/index.html",
+ "method": "HEAD",
+ "header": {
+ "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.crt.pem b/kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.crt.pem
new file mode 100644
index 0000000..5696aa3
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.crt.pem
@@ -0,0 +1,25 @@
+Bag Attributes
+ friendlyName: tomcat
+ localKeyID: 54 69 6D 65 20 31 34 39 33 33 32 33 39 32 32 37 35 31
+subject=/C=CA/ST=Ontario/L=Ottawa/O=ONAP/OU=ONAP/CN=ONAP
+issuer=/C=CA/ST=Ontario/L=Ottawa/O=ONAP/OU=ONAP/CN=ONAP
+-----BEGIN CERTIFICATE-----
+MIIDWTCCAkGgAwIBAgIERWHcIzANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJD
+QTEQMA4GA1UECBMHT250YXJpbzEPMA0GA1UEBxMGT3R0YXdhMQ0wCwYDVQQKEwRP
+TkFQMQ0wCwYDVQQLEwRPTkFQMQ0wCwYDVQQDEwRPTkFQMB4XDTE3MDQyNzIwMDUz
+N1oXDTM3MDExMjIwMDUzN1owXTELMAkGA1UEBhMCQ0ExEDAOBgNVBAgTB09udGFy
+aW8xDzANBgNVBAcTBk90dGF3YTENMAsGA1UEChMET05BUDENMAsGA1UECxMET05B
+UDENMAsGA1UEAxMET05BUDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AJsQpjB5U0exZHWKVt6xDzmBBhLiAtv7Qb8zsbAcIZPxuKsieOJykWDCaf+Ip7oe
++b86nf4LmKrNm4KMsDNnlU7Bg7+3HFa7m+tZgfILORv2HPMRXgvcqPFr1dxgTBkp
+xtlcGXHhA8oBpmqTmOCitE+ngVH+FBVxN93aHEDz+Dgc06PyzoP/xWI0GjvlOsv/
+qZeXCj6K4Hpu/FSPNk06Piq9M+rDwUMuyaRtY9FWjYMvkMCrRvlZUoAasrC0BGyR
+UAboHdk5aW3AZ0cVR6NMSlELcvCUFqzacAOWLgffX3b5vhkOaAsmnnzmxANV6s0t
+SqrD6Mmjg5OcYJW4VFKrwjUCAwEAAaMhMB8wHQYDVR0OBBYEFNji+IU70Qgptn4i
+boq/rOKNAg8tMA0GCSqGSIb3DQEBCwUAA4IBAQBc5mJLeeUUzJ4MujZjn0DS3Lvv
+THJTE54Id1euT3ddzfX3htF0Ewd90YzmLuj1y8r8PXj7b/8Bq+cvoKbmJ42c8h3X
+If0tqde+gYWx1X3NAWHwz00Cje9R0KY4Bx1Cvr39jTw/ESnuSQDKPHBnn8WyAS9K
+08ZhvrVSK54d3U7tDVut9UVva8Scdi12utTAWaOIlusLo3bU9Z6t+tgg7AnQBYc0
+N9oCMbq/MACFlLSdc1J6NITYS8XHY2RS8u88eLbWkCcEEx1glYz/PMX3+V1Ow9Uy
+MjenEx8ifl96ZSOe9XsI2gl2TCaevCY/QuREu4LZB9XmO0gncH7gF5w9Bw2b
+-----END CERTIFICATE-----
diff --git a/kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.key.pem b/kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.key.pem
new file mode 100644
index 0000000..c7e386e
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.key.pem
@@ -0,0 +1,32 @@
+Bag Attributes
+ friendlyName: tomcat
+ localKeyID: 54 69 6D 65 20 31 34 39 33 33 32 33 39 32 32 37 35 31
+Key Attributes: <No Attributes>
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCbEKYweVNHsWR1
+ilbesQ85gQYS4gLb+0G/M7GwHCGT8birInjicpFgwmn/iKe6Hvm/Op3+C5iqzZuC
+jLAzZ5VOwYO/txxWu5vrWYHyCzkb9hzzEV4L3Kjxa9XcYEwZKcbZXBlx4QPKAaZq
+k5jgorRPp4FR/hQVcTfd2hxA8/g4HNOj8s6D/8ViNBo75TrL/6mXlwo+iuB6bvxU
+jzZNOj4qvTPqw8FDLsmkbWPRVo2DL5DAq0b5WVKAGrKwtARskVAG6B3ZOWltwGdH
+FUejTEpRC3LwlBas2nADli4H3192+b4ZDmgLJp585sQDVerNLUqqw+jJo4OTnGCV
+uFRSq8I1AgMBAAECggEANFs6wcM1S0+qC8XZ7vb5nQDjfByzunLrkBN0O3JEJB/J
+qn7JMixcyb7a61zIxR8QVHEGR3DC62jgyQOXusOOtjjAs0qwVtihnKVsKr1/WuGO
+hMOobXjj0iAG5ZHeH+DrMxjVvo2rKdnExtdvFunY18xG7dhMD7Fam525THUTql4K
+yxhT7X6MrfS1eFjbR6oAIGNjoNTwyyEjEm4yvHO3PnG2NeyIeu7zIO2k+GimAAXT
+tN3AK30lmr3+35k6o+XQAhDE4/6msn6jBVSdLfK35ATFGwrojD0bCgALR4SUNEyd
+i33nuNLGyeI7DPWbqmjyWQW9uWLFJD85We2HzqBZQQKBgQDIrJ4PLvYE75dFWnSa
+lBr1HZbl/x5mP56MVEiwTabRbUsJoXKlX44lm9hwQaPbuoUAflb1ZtNKbyiRVsuN
+Ft5RToU9PWXyFtc2eyLCJToxHI4MhsuGRAaEeic5+l12wdpRxl74eeXdKJK4P/iU
+8wdhSxDG2ekkj6lyye5l5iwcBwKBgQDF0Pptcs+yPCz9FRqCmHT/I4QTK1VSD6mW
+F2Yd2KEUa4aocIb+L56ghJfYR+enIe9hHmb0ulomJaLLTicZJk6ffDfaQpCFBiS7
+BirDqHX8zlnBHePrBzZPyA5EfGMLxlP4uUk4g28JMFBJaZTEXAnQLUH0mIm0o0YR
+mbsaVo/Y4wKBgFsG8iuxAaf7hoLPJVV5GUFWyrxJnWCEO0csdEyE7MbS7NbRhU++
+qJwmtWc2Xz2svegbZxaqLe31vlEvLeYyGWaIV6gP0c6ezcDI2lt2x46/hS/pdSjS
+cqJlRqXmC79y77VoZmwP31USsnshiYEHPLHFeza4YilTgWmwb5OJdTjBAoGBAJBC
+0P7UhedjvyNqKoUnDdurWPxp07Ueuvw8YDpP61jq+a8JMUlaDQLe76XI+oWGV/6p
+n0fGR0weklRV0Gmk6B2jB1BizuZUDqFd4/4ActtE2WvekoKqJc+VA+KqG8lQf5iZ
+924BXA6Fb2e6WcXBoV5yQvFP9M0JbWYUiMCydAElAoGBAKof78r8POfTPq9fQA9I
+0zsQGnxqnSqyIu5yobM3GyXHBPOKdevlxyXxuMnGTr7upSNZrDrrA+f5Czlu7Fas
+qdt/5PmqYQjRsVoHNQFatUzHWwx2vU2Pr1jBpZFBpnjnLwn3A35+UEWn13nCjkla
+TrDniEcyId4ya5cMLDnM7Zgw
+-----END PRIVATE KEY-----
diff --git a/kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json b/kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json
new file mode 100644
index 0000000..d14b164
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json
@@ -0,0 +1,23 @@
+{
+ "service": {
+ "name": "Health Check: Log - Elastic Search",
+ "checks": [
+ {
+ "id": "log-elasticsearch-server",
+ "name": "Log Elastic Search Health Check",
+ "http": "http://elasticsearch.{{ .Values.nsPrefix }}:9200/_cluster/health?pretty",
+ "method": "GET",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "log-elasticsearch-tcp",
+ "name": "Log Elastic Search TCP Health Check",
+ "tcp": "elasticsearchtcp.{{ .Values.nsPrefix }}:9300",
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/log-kibana.json b/kubernetes/consul/resources/config/consul-agent-config/log-kibana.json
new file mode 100644
index 0000000..cc72bbf
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/log-kibana.json
@@ -0,0 +1,16 @@
+{
+ "service": {
+ "name": "Health Check: Log - Kibana",
+ "checks": [
+ {
+ "id": "log-kibana-server",
+ "name": "Log kibana Health Check",
+ "http": "http://kibana.{{ .Values.nsPrefix }}:5601/status",
+ "method": "HEAD",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/log-logstash.json b/kubernetes/consul/resources/config/consul-agent-config/log-logstash.json
new file mode 100644
index 0000000..9eb60fd
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/log-logstash.json
@@ -0,0 +1,95 @@
+{
+ "service": {
+ "name": "Health Check: Log - Log Stash",
+ "checks": [
+ {
+ "id": "log-logstash-internal-server-gi",
+ "name": "Log Stash Health Check - General Information",
+ "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/?pretty",
+ "method": "GET",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "log-logstash-internal-server-node-info",
+ "name": "Log Stash Health Check - Node Information",
+ "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/?pretty",
+ "method": "GET",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "log-logstash-internal-server-os-info",
+ "name": "Log Stash Health Check - OS Information",
+ "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/os?pretty",
+ "method": "GET",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "log-logstash-internal-server-jvm-info",
+ "name": "Log Stash Health Check - JVM Information",
+ "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/jvm?pretty",
+ "method": "GET",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "log-logstash-internal-server-plugin-info",
+ "name": "Log Stash Health Check - Plugin Information",
+ "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/plugins?pretty",
+ "method": "GET",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "log-logstash-internal-server-node-stat",
+ "name": "Log Stash Health Check - Node Stats",
+ "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats?pretty",
+ "method": "GET",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "log-logstash-internal-server-jvm-stat",
+ "name": "Log Stash Health Check - JVM Stats",
+ "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/jvm?pretty",
+ "method": "GET",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "log-logstash-internal-server-process-stat",
+ "name": "Log Stash Health Check - Process Stats",
+ "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/process?pretty",
+ "method": "GET",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "log-logstash-internal-server-os-stat",
+ "name": "Log Stash Health Check - OS Stats",
+ "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/os?pretty",
+ "method": "GET",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "log-logstash-tcp",
+ "name": "Log Stash File Beat TCP Health Check",
+ "tcp": "logstash.{{ .Values.nsPrefix }}:5044",
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/model-loader.properties b/kubernetes/consul/resources/config/consul-agent-config/model-loader.properties
new file mode 100644
index 0000000..b2db044
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/model-loader.properties
@@ -0,0 +1,23 @@
+# Model Loader Distribution Client Configuration
+ml.distribution.ACTIVE_SERVER_TLS_AUTH=false
+ml.distribution.ASDC_ADDRESS=c2.vm1.sdc.simpledemo.openecomp.org:8443
+ml.distribution.CONSUMER_GROUP=aai-ml-group
+ml.distribution.CONSUMER_ID=aai-ml
+ml.distribution.ENVIRONMENT_NAME=AUTO
+ml.distribution.KEYSTORE_PASSWORD=
+ml.distribution.KEYSTORE_FILE=asdc-client.jks
+ml.distribution.PASSWORD=OBF:1ks51l8d1o3i1pcc1r2r1e211r391kls1pyj1z7u1njf1lx51go21hnj1y0k1mli1sop1k8o1j651vu91mxw1vun1mze1vv11j8x1k5i1sp11mjc1y161hlr1gm41m111nkj1z781pw31kku1r4p1e391r571pbm1o741l4x1ksp
+ml.distribution.POLLING_INTERVAL=30
+ml.distribution.POLLING_TIMEOUT=20
+ml.distribution.USER=aai
+ml.distribution.ARTIFACT_TYPES=MODEL_INVENTORY_PROFILE,MODEL_QUERY_SPEC,VNF_CATALOG
+
+# Model Loader AAI REST Client Configuration
+ml.aai.BASE_URL=https://c1.vm1.aai.simpledemo.openecomp.org:8443
+ml.aai.MODEL_URL=/aai/v10/service-design-and-creation/models/model/
+ml.aai.NAMED_QUERY_URL=/aai/v10/service-design-and-creation/named-queries/named-query/
+ml.aai.VNF_IMAGE_URL=/aai/v8/service-design-and-creation/vnf-images
+ml.aai.KEYSTORE_FILE=aai-os-cert.p12
+ml.aai.KEYSTORE_PASSWORD=OBF:1i9a1u2a1unz1lr61wn51wn11lss1unz1u301i6o
+ml.aai.AUTH_USER=ModelLoader
+ml.aai.AUTH_PASSWORD=OBF:1qvu1v2h1sov1sar1wfw1j7j1wg21saj1sov1v1x1qxw
diff --git a/kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json b/kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json
new file mode 100644
index 0000000..ee0d90e
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json
@@ -0,0 +1,10 @@
+{
+ "service": {
+ "name": "Health Check: Message Router - DMaaP",
+ "check": {
+ "http": "http://dmaap.{{ .Values.nsPrefix }}:3904/topics",
+ "interval": "30s",
+ "timeout": "1s"
+ }
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/mr-kafka-health.json b/kubernetes/consul/resources/config/consul-agent-config/mr-kafka-health.json
new file mode 100644
index 0000000..df3b190
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/mr-kafka-health.json
@@ -0,0 +1,10 @@
+{
+ "service": {
+ "name": "Health Check: Message Router - Kafka",
+ "check": {
+ "script": "/consul/scripts/mr-kafka-health.sh",
+ "interval": "30s",
+ "timeout": "1s"
+ }
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/mr-zookeeper-health.json b/kubernetes/consul/resources/config/consul-agent-config/mr-zookeeper-health.json
new file mode 100644
index 0000000..36d295c
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/mr-zookeeper-health.json
@@ -0,0 +1,10 @@
+{
+ "service": {
+ "name": "Health Check: Message Router - ZooKeeper",
+ "check": {
+ "script": "/consul/scripts/mr-zookeeper-health.sh",
+ "interval": "30s",
+ "timeout": "1s"
+ }
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/msb-health.json b/kubernetes/consul/resources/config/consul-agent-config/msb-health.json
new file mode 100644
index 0000000..d15c2ef
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/msb-health.json
@@ -0,0 +1,39 @@
+{
+ "service": {
+ "name": "Health Check: MSB",
+ "checks": [
+ {
+ "id": "msb-eag.{{ .Values.nsPrefix }}",
+ "name": "MSB eag Health Check",
+ "http": "http://msb-eag.{{ .Values.nsPrefix }}:80/iui/microservices/default.html",
+ "method": "HEAD",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "msb-iag.{{ .Values.nsPrefix }}",
+ "name": "MSB iag Health Check",
+ "http": "http://msb-iag.{{ .Values.nsPrefix }}:80/iui/microservices/default.html",
+ "method": "HEAD",
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "msb-consul.{{ .Values.nsPrefix }}",
+ "name": "MSB consul Health Check",
+ "tcp": "msb-consul.{{ .Values.nsPrefix }}:8500",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "msb-discovery.{{ .Values.nsPrefix }}",
+ "name": "MSB discovery Health Check",
+ "tcp": "msb-discovery.{{ .Values.nsPrefix }}:10081",
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/mso-health.json b/kubernetes/consul/resources/config/consul-agent-config/mso-health.json
new file mode 100644
index 0000000..1df7714
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/mso-health.json
@@ -0,0 +1,28 @@
+{
+ "service": {
+ "name": "Health Check: MSO",
+ "checks": [
+ {
+ "id": "mso-api-healthcheck",
+ "name": "MSO API Health Check",
+ "script": "/consul/scripts/mso-api-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "mso-camunda-healthcheck",
+ "name": "MSO Camunda Health Check",
+ "script": "/consul/scripts/mso-camunda-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "mso-jra-healthcheck",
+ "name": "MSO JRA Health Check",
+ "script": "/consul/scripts/mso-jra-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json b/kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json
new file mode 100644
index 0000000..54bd2ef
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json
@@ -0,0 +1,15 @@
+{
+ "service": {
+ "name": "Health Check: MSO - MariaDb",
+ "checks": [
+ {
+ "id": "mso-mariadb",
+ "name": "MSO Mariadb Health Check",
+ "script": "/consul/scripts/mso-mariadb-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json b/kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json
new file mode 100644
index 0000000..dba7c77
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json
@@ -0,0 +1,63 @@
+{
+ "service": {
+ "name": "Health Check: MULTICLOUD",
+ "checks": [
+ {
+ "id": "framework",
+ "name": "Framework Health Check",
+ "http": "http://framework.{{ .Values.nsPrefix }}:9001/api/multicloud/v0/swagger.json",
+ "method": "HEAD",
+ "header": {
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "multicloud-ocata",
+ "name": "Multicloud Ocata Health Check",
+ "http": "http://multicloud-ocata.{{ .Values.nsPrefix }}:9006/api/multicloud-ocata/v0/swagger.json",
+ "method": "HEAD",
+ "header": {
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "multicloud-vio",
+ "name": "Multicloud Vio Health Check",
+ "http": "http://multicloud-vio.{{ .Values.nsPrefix }}:9004/api/multicloud-vio/v0/swagger.json",
+ "method": "HEAD",
+ "header": {
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "multicloud-windriver",
+ "name": "Multicloud Windriver Health Check",
+ "http": "http://multicloud-windriver.{{ .Values.nsPrefix }}:9005/api/multicloud-titanium_cloud/v0/swagger.json",
+ "method": "HEAD",
+ "header": {
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt
new file mode 100644
index 0000000..a6e084c
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt
@@ -0,0 +1,9 @@
+{
+ "vnfId" : "testwrite",
+ "device" : "10.198.1.31",
+ "timestamp" : "2017-08-23T19:13:56Z",
+ "jdmTotalMem" : "2097152",
+ "jdmAvailableMem" : "1877272",
+ "jdmUserCpu" : "16",
+ "jdmSystemCpu" : "3"
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh
new file mode 100755
index 0000000..3d26f6e
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh
@@ -0,0 +1,17 @@
+if curl -s -X PUT http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite -d @/consul/scripts/aai-search-storage-write-doc.txt | grep '\"created\":true'; then
+ if curl -s -X DELETE http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite | grep '\"failed\":0'; then
+ if curl -s -X GET http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite | grep '\"found\":false'; then
+ echo Successful PUT, DELETE, GET from Search Document Storage 2>&1
+ exit 0
+ else
+ echo Failed GET from Search Document Storage 2>&1
+ exit 1
+ fi
+ else
+ echo Failed DELETE from Search Document Storage 2>&1
+ exit 1
+ fi
+else
+ echo Failed PUT from Search Document Storage 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh
new file mode 100755
index 0000000..5f91c5e
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh
@@ -0,0 +1,13 @@
+APPC_DBHOST_POD=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "appc-dbhost-[^[:space:]]*")
+if [ -n "$APPC_DBHOST_POD" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $APPC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
+ echo Success. APPC DBHost is running. 2>&1
+ exit 0
+ else
+ echo Failed. APPC DBHost is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. APPC DBHost is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh
new file mode 100755
index 0000000..035e7c8
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh
@@ -0,0 +1,16 @@
+
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-data-router[^[:space:]]*")
+
+if [ -n "$NAME" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'data-router' > /dev/null; then
+
+ echo Success. Synapse process is running. 2>&1
+ exit 0
+ else
+ echo Failed. Synapse process is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. Synapse container is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh
new file mode 100755
index 0000000..9a4b4df
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh
@@ -0,0 +1,16 @@
+
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-model-loader[^[:space:]]*")
+
+if [ -n "$NAME" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'model-loader' > /dev/null; then
+
+ echo Success. Model Loader process is running. 2>&1
+ exit 0
+ else
+ echo Failed. Model Loader process is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. Model Loader container is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh
new file mode 100755
index 0000000..a109032
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh
@@ -0,0 +1,13 @@
+kafkapod=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "message-router-global-kafka-[^[:space:]]*")
+if [ -n "$kafkapod" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $kafkapod -- ps ef | grep -i kafka; then
+ echo Success. Kafka process is running. 2>&1
+ exit 0
+ else
+ echo Failed. Kafka is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. Kafka container is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh
new file mode 100755
index 0000000..47c42d5
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh
@@ -0,0 +1,13 @@
+zkpod=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "message-router-zookeeper-[^[:space:]]*")
+if [ -n "$zkpod" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $zkpod -- ps ef | grep -i zookeeper; then
+ echo Success. Zookeeper process is running. 2>&1
+ exit 0
+ else
+ echo Failed. Zookeeper is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. Zookeeper container is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh
new file mode 100755
index 0000000..8f3f85c
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh
@@ -0,0 +1,15 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/ecomp/mso/infra/healthcheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh
new file mode 100755
index 0000000..341ff19
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh
@@ -0,0 +1,15 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/mso/healthcheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh
new file mode 100755
index 0000000..beeb289
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh
@@ -0,0 +1,15 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/networks/rest/healthcheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh
new file mode 100755
index 0000000..aa73a73
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh
@@ -0,0 +1,14 @@
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "mso-mariadb[^[:space:]]*")
+
+ if [ -n "$NAME" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
+ echo Success. mariadb process is running. 2>&1
+ exit 0
+ else
+ echo Failed. mariadb process is not running. 2>&1
+ exit 1
+ fi
+ else
+ echo Failed. mariadb container is offline. 2>&1
+ exit 1
+ fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh
new file mode 100755
index 0000000..00a0564
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component BE) and check to see if
+## the BE component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "BE" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh
new file mode 100755
index 0000000..9950cc9
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component CASSANDRA) and check to see if
+## the CASSANDRA component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "CASSANDRA" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh
new file mode 100755
index 0000000..27f3b22
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component FE) and check to see if
+## the FE component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "FE" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh
new file mode 100755
index 0000000..c5955f3
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component TITAN) and check to see if
+## the TITAN component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "TITAN" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh
new file mode 100755
index 0000000..27b9b9f
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh
@@ -0,0 +1,13 @@
+SDNC_DBHOST_POD=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "sdnc-dbhost-[^[:space:]]*")
+if [ -n "$SDNC_DBHOST_POD" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $SDNC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
+ echo Success. SDNC DBHost is running. 2>&1
+ exit 0
+ else
+ echo Failed. SDNC DBHost is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. SDNC DBHost is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh
new file mode 100644
index 0000000..d511873
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+
+SEARCH_SERVICE_NAME="search-data-service.{{ .Values.nsPrefix }}"
+SEARCH_SERVICE_PORT=9509
+HEALTH_CHECK_INDEX="healthcheck"
+
+# 'Document Index' REST Endpoint
+INDEX_URL="https://$SEARCH_SERVICE_NAME:$SEARCH_SERVICE_PORT/services/search-data-service/v1/search/indexes/$HEALTH_CHECK_INDEX"
+INDEX_SCHEMA="{\"fields\":[{\"name\": \"field1\", \"data-type\": \"string\"}]}"
+
+SEARCH_CERT_FILE="/consul/certs/client-cert-onap.crt.pem"
+SEARCH_KEY_FILE="/consul/certs/client-cert-onap.key.pem"
+
+## Try to create an index via the Search Data Service API.
+CREATE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "$INDEX_SCHEMA" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X PUT $INDEX_URL)
+
+RESULT_STRING=" "
+
+if [ $CREATE_INDEX_RESP -eq 201 ]; then
+ RESULT_STRING="Service Is Able To Communicate With Back End"
+elif [ $CREATE_INDEX_RESP -eq 400 ]; then
+ # A 400 response could mean that the index already exists (ie: we didn't
+ # clean up after ourselves on a previous check), so log the response but
+ # don't exit yet. If we fail on the delete then we can consider the
+ # check a failure, otherwise, we are good.
+ RESULT_STRING="$RESULT_STRING Create Index [FAIL - 400 (possible index already exists)] "
+else
+ RESULT_STRING="Service API Failure - $CREATE_INDEX_RESP"
+ echo $RESULT_STRING
+ exit 1
+fi
+
+## Now, clean up after ourselves.
+DELETE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "{ }" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X DELETE $INDEX_URL)
+
+if [ $DELETE_INDEX_RESP -eq 200 ]; then
+ RESULT_STRING="Service Is Able To Communicate With Back End"
+else
+ RESULT_STRING="Service API Failure - $DELETE_INDEX_RESP"
+ echo $RESULT_STRING
+ exit 1
+fi
+
+echo $RESULT_STRING
+return 0
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh
new file mode 100755
index 0000000..7796681
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh
@@ -0,0 +1,16 @@
+
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-sparky-be[^[:space:]]*")
+
+if [ -n "$NAME" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'sparky' > /dev/null; then
+
+ echo Success. UI Backend Service process is running. 2>&1
+ exit 0
+ else
+ echo Failed. UI Backend Service process is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. UI Backend Service container is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh
new file mode 100755
index 0000000..dc7768f
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh
@@ -0,0 +1,20 @@
+
+# Query the Hbase service for the cluster status.
+GET_CLUSTER_STATUS_RESPONSE=$(curl -si -X GET -H "Accept: text/xml" http://hbase.{{ .Values.nsPrefix }}:8080/status/cluster)
+
+if [ -z "$GET_CLUSTER_STATUS_RESPONSE" ]; then
+ echo "Tabular store is unreachable."
+ return 2
+fi
+
+# Check the resulting status JSON to see if there is a 'DeadNodes' stanza with
+# entries.
+DEAD_NODES=$(echo $GET_CLUSTER_STATUS_RESPONSE | grep "<DeadNodes/>")
+
+if [ -n "$DEAD_NODES" ]; then
+ echo "Tabular store is up and accessible."
+ return 0
+else
+ echo "Tabular store is up but is reporting dead nodes - cluster may be in degraded state."
+ return 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh
new file mode 100755
index 0000000..bbb080f
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh
@@ -0,0 +1,14 @@
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "vid-mariadb[^[:space:]]*")
+
+ if [ -n "$NAME" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
+ echo Success. mariadb process is running. 2>&1
+ exit 0
+ else
+ echo Failed. mariadb process is not running. 2>&1
+ exit 1
+ fi
+ else
+ echo Failed. mariadb container is offline. 2>&1
+ exit 1
+ fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdc-health.json b/kubernetes/consul/resources/config/consul-agent-config/sdc-health.json
new file mode 100644
index 0000000..ec8ec86
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdc-health.json
@@ -0,0 +1,49 @@
+{
+ "service": {
+ "name": "Health Check: SDC",
+ "checks": [
+ {
+ "id": "sdc-fe-healthcheck",
+ "name": "SDC Front End Health Check",
+ "script": "/consul/scripts/sdc-fe-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "sdc-be-healthcheck",
+ "name": "SDC Back End Health Check",
+ "script": "/consul/scripts/sdc-be-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "sdc-titan-healthcheck",
+ "name": "SDC Titan Health Check",
+ "script": "/consul/scripts/sdc-titan-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "sdc-cs-healthcheck",
+ "name": "SDC Cassandra Health Check",
+ "script": "/consul/scripts/sdc-cs-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "sdc-catalog-healthcheck",
+ "name": "SDC Catalog Health Check",
+ "http": "https://sdc-be.{{ .Values.nsPrefix }}:8443/asdc/v1/catalog/services",
+ "header": {
+ "Authorization": ["Basic dmlkOktwOGJKNFNYc3pNMFdYbGhhazNlSGxjc2UyZ0F3ODR2YW9HR21KdlV5MlU="],
+ "X-ECOMP-InstanceID": ["VID"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-dbhost.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-dbhost.json
new file mode 100644
index 0000000..ea0ae56
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-dbhost.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "Health Check: SDNC - DB Host",
+ "checks": [
+ {
+ "id": "sdnc-dbhost-healthcheck",
+ "name": "SDNC DBHOST Health Check",
+ "script": "/consul/scripts/sdnc-dbhost-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json
new file mode 100644
index 0000000..6ae14af
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json
@@ -0,0 +1,22 @@
+{
+ "service": {
+ "name": "Health Check: SDNC - DGBuilder",
+ "checks": [
+ {
+ "id": "sdnc-dgbuilder",
+ "name": "SDNC-DGbuilder Health Check",
+ "http": "http://sdnc-dgbuilder.{{ .Values.nsPrefix }}:3000/",
+ "method": "HEAD",
+ "header": {
+ "Authorization": ["Basic ZGd1c2VyOnRlc3QxMjM="],
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json
new file mode 100644
index 0000000..0ee5e89
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json
@@ -0,0 +1,22 @@
+{
+ "service": {
+ "name": "Health Check: SDNC",
+ "checks": [
+ {
+ "id": "odl-api-healthcheck",
+ "name": "SDNC API Health Check",
+ "http": "http://sdnhost.{{ .Values.nsPrefix }}:8282/restconf/operations/SLI-API:healthcheck",
+ "method": "POST",
+ "header": {
+ "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json
new file mode 100644
index 0000000..092df05
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json
@@ -0,0 +1,21 @@
+{
+ "service": {
+ "name": "Health Check: SDNC Portal",
+ "checks": [
+ {
+ "id": "sdnc-portal",
+ "name": "SDNC Portal Health Check",
+ "http": "http://sdnc-portal.{{ .Values.nsPrefix }}:8843/login",
+ "method": "HEAD",
+ "header": {
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json
new file mode 100644
index 0000000..ed4a29d
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "Health Check: SDNC-SDN-CTL-DB-01",
+ "checks": [
+ {
+ "id": "sdnctldb01.{{ .Values.nsPrefix }}",
+ "name": "SDNC SDNCTLDB01 Health Check",
+ "tcp": "sdnctldb01.{{ .Values.nsPrefix }}:3306",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json
new file mode 100644
index 0000000..8c4700b
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "Health Check: SDNC-SDN-CTL-DB-02",
+ "checks": [
+ {
+ "id": "sdnctldb02.{{ .Values.nsPrefix }}",
+ "name": "SDNC SDNCTLDB02 Health Check",
+ "tcp": "sdnctldb02.{{ .Values.nsPrefix }}:3306",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json
new file mode 100644
index 0000000..585b9c0
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json
@@ -0,0 +1,22 @@
+{
+ "service": {
+ "name": "Health Check: SDNC - SDN Host",
+ "checks": [
+ {
+ "id": "sdnc-sdnhost",
+ "name": "SDNC SDN Host Health Check",
+ "http": "http://sdnhost.{{ .Values.nsPrefix }}:8282/apidoc/explorer/index.html",
+ "method": "HEAD",
+ "header": {
+ "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/vfc-health.json b/kubernetes/consul/resources/config/consul-agent-config/vfc-health.json
new file mode 100644
index 0000000..5ace3e4
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/vfc-health.json
@@ -0,0 +1,112 @@
+{
+ "service": {
+ "name": "Health Check: VFC",
+ "checks": [
+ {
+ "id": "vfc-catalog.{{ .Values.nsPrefix }}",
+ "name": "VFC catalog Health Check",
+ "tcp": "vfc-catalog.{{ .Values.nsPrefix }}:8806",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-emsdriver.{{ .Values.nsPrefix }}",
+ "name": "VFC emsdriver Health Check",
+ "tcp": "vfc-emsdriver.{{ .Values.nsPrefix }}:8206",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-gvnfmdriver.{{ .Values.nsPrefix }}",
+ "name": "VFC gvnfmdriver Health Check",
+ "tcp": "vfc-gvnfmdriver.{{ .Values.nsPrefix }}:8484",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-hwvnfmdriver.{{ .Values.nsPrefix }}",
+ "name": "VFC hwvnfmdriver Health Check",
+ "tcp": "vfc-hwvnfmdriver.{{ .Values.nsPrefix }}:8482",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-jujudriver.{{ .Values.nsPrefix }}",
+ "name": "VFC jujudriver Health Check",
+ "tcp": "vfc-jujudriver.{{ .Values.nsPrefix }}:8483",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-nokiavnfmdriver.{{ .Values.nsPrefix }}",
+ "name": "VFC nokiavnfmdriver Health Check",
+ "tcp": "vfc-nokiavnfmdriver.{{ .Values.nsPrefix }}:8486",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-nslcm.{{ .Values.nsPrefix }}",
+ "name": "VFC nslcm Health Check",
+ "tcp": "vfc-nslcm.{{ .Values.nsPrefix }}:8403",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-resmgr.{{ .Values.nsPrefix }}",
+ "name": "VFC resmgr Health Check",
+ "tcp": "vfc-resmgr.{{ .Values.nsPrefix }}:8480",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-vnflcm.{{ .Values.nsPrefix }}",
+ "name": "VFC vnflcm Health Check",
+ "tcp": "vfc-vnflcm.{{ .Values.nsPrefix }}:8801",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-vnfmgr.{{ .Values.nsPrefix }}",
+ "name": "VFC vnfmgr Health Check",
+ "tcp": "vfc-vnfmgr.{{ .Values.nsPrefix }}:8803",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-vnfres.{{ .Values.nsPrefix }}",
+ "name": "VFC vnfres Health Check",
+ "tcp": "vfc-vnfres.{{ .Values.nsPrefix }}:8802",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-workflow.{{ .Values.nsPrefix }}",
+ "name": "VFC workflow Health Check",
+ "tcp": "vfc-workflow.{{ .Values.nsPrefix }}:10550",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-workflowengineactiviti.{{ .Values.nsPrefix }}",
+ "name": "VFC workflow-engine Health Check",
+ "tcp": "vfc-workflowengineactiviti.{{ .Values.nsPrefix }}:8080",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-ztesdncdriver.{{ .Values.nsPrefix }}",
+ "name": "VFC ztesdncdriver Health Check",
+ "tcp": "vfc-ztesdncdriver.{{ .Values.nsPrefix }}:8411",
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vfc-ztevnfmdriver.{{ .Values.nsPrefix }}",
+ "name": "VFC ztevnfmdriver Health Check",
+ "tcp": "vfc-ztevnfmdriver.{{ .Values.nsPrefix }}:8410",
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/vid-health.json b/kubernetes/consul/resources/config/consul-agent-config/vid-health.json
new file mode 100644
index 0000000..2dc6f0a
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/vid-health.json
@@ -0,0 +1,29 @@
+{
+ "service": {
+ "name": "Health Check: VID",
+ "checks": [
+ {
+ "id": "vid-server",
+ "name": "VID Server Health Check",
+ "http": "http://vid-server.{{ .Values.nsPrefix }}:8080/vid/healthCheck",
+ "method": "GET",
+ "header": {
+ "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ },
+ {
+ "id": "vid-mariadb",
+ "name": "Vid Mariadb Health Check",
+ "script": "/consul/scripts/vid-mariadb-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}