blob: 573a81a435a0848b5bc431e49bcc5aec762c664d [file] [log] [blame]
ecaiyanlinux845bc002020-08-12 12:57:09 +02001# LICENSE_START=======================================================
2# org.onap.dmaap
3# ================================================================================
ecaiyanlinux4b0a0a22020-08-17 13:29:16 +02004# Copyright © 2020 Nordix Foundation. All rights reserved.
ecaiyanlinux845bc002020-08-12 12:57:09 +02005# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
6# ================================================================================
7# Licensed under the Apache License, Version 2.0 (the "License");
8# you may not use this file except in compliance with the License.
9# You may obtain a copy of the License at
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS,
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17# ============LICENSE_END=========================================================
18#
19# ECOMP is a trademark and service mark of AT&T Intellectual Property.
20#
21###############################################################################
22###############################################################################
23##
24## Cambria API Server config
25##
26## Default values are shown as commented settings.
27##
28###############################################################################
29##
30## HTTP service
31##
32## 3904 is standard as of 7/29/14.
33#
34## Zookeeper Connection
35##
36## Both Cambria and Kafka make use of Zookeeper.
37##
38#config.zk.servers=172.18.1.1
39#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
40config.zk.servers=zookeeper:2181
41
42#config.zk.root=/fe3c/cambria/config
43
44
45###############################################################################
46##
47## Kafka Connection
48##
49## Items below are passed through to Kafka's producer and consumer
50## configurations (after removing "kafka.")
51## if you want to change request.required.acks it can take this one value
52#kafka.metadata.broker.list=localhost:9092,localhost:9093
53#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
54kafka.metadata.broker.list=kafka:9092
55##kafka.request.required.acks=-1
56#kafka.client.zookeeper=${config.zk.servers}
57consumer.timeout.ms=100
58zookeeper.connection.timeout.ms=6000
59zookeeper.session.timeout.ms=20000
60zookeeper.sync.time.ms=2000
61auto.commit.interval.ms=1000
62fetch.message.max.bytes =1000000
63auto.commit.enable=false
64
65#(backoff*retries > zksessiontimeout)
66kafka.rebalance.backoff.ms=10000
67kafka.rebalance.max.retries=6
68
69
70###############################################################################
71##
72## Secured Config
73##
74## Some data stored in the config system is sensitive -- API keys and secrets,
75## for example. to protect it, we use an encryption layer for this section
76## of the config.
77##
78## The key is a base64 encode AES key. This must be created/configured for
79## each installation.
80#cambria.secureConfig.key=
81##
82## The initialization vector is a 16 byte value specific to the secured store.
83## This must be created/configured for each installation.
84#cambria.secureConfig.iv=
85
86## Southfield Sandbox
87cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
88cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
89authentication.adminSecret=fe3cCompound
90#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
91#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
92
93
94###############################################################################
95##
96## Consumer Caching
97##
98## Kafka expects live connections from the consumer to the broker, which
99## obviously doesn't work over connectionless HTTP requests. The Cambria
100## server proxies HTTP requests into Kafka consumer sessions that are kept
101## around for later re-use. Not doing so is costly for setup per request,
102## which would substantially impact a high volume consumer's performance.
103##
104## This complicates Cambria server failover, because we often need server
105## A to close its connection before server B brings up the replacement.
106##
107
108## The consumer cache is normally enabled.
109#cambria.consumer.cache.enabled=true
110
111## Cached consumers are cleaned up after a period of disuse. The server inspects
112## consumers every sweepFreqSeconds and will clean up any connections that are
113## dormant for touchFreqMs.
114#cambria.consumer.cache.sweepFreqSeconds=15
115cambria.consumer.cache.touchFreqMs=120000
116##stickforallconsumerrequests=false
117## The cache is managed through ZK. The default value for the ZK connection
118## string is the same as config.zk.servers.
119#cambria.consumer.cache.zkConnect=${config.zk.servers}
120
121##
122## Shared cache information is associated with this node's name. The default
123## name is the hostname plus the HTTP service port this host runs on. (The
124## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
125## which is not always adequate.) You can set this value explicitly here.
126##
127#cambria.api.node.identifier=<use-something-unique-to-this-instance>
128
129#cambria.rateLimit.maxEmptyPollsPerMinute=30
130#cambria.rateLimitActual.delay.ms=10
131
132###############################################################################
133##
134## Metrics Reporting
135##
136## This server can report its metrics periodically on a topic.
137##
138#metrics.send.cambria.enabled=true
139#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
140#metrics.send.cambria.sendEverySeconds=60
141
142cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
143consumer.timeout=17
144default.partitions=3
145default.replicas=3
146##############################################################################
147#100mb
148maxcontentlength=10000
149
150
151##############################################################################
152#AAF Properties
153msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
154msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
155enforced.topic.name.AAF=org.onap.dmaap.mr
156forceAAF=false
157transidUEBtopicreqd=false
158defaultNSforUEB=org.onap.dmaap.mr
159##############################################################################
160#Mirror Maker Agent
161
162msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
163msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
164msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
165msgRtr.mirrormaker.timeout=15000
166msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
167msgRtr.mirrormaker.consumergroup=mmagentserver
168msgRtr.mirrormaker.consumerid=1
169
170kafka.max.poll.interval.ms=300000
171kafka.heartbeat.interval.ms=60000
172kafka.session.timeout.ms=240000
173kafka.max.poll.records=1000