Nelson,Thomas(tn1381)(arthurdent3) | 4807fdf | 2018-09-19 16:52:36 -0400 | [diff] [blame] | 1 | ## As weighted quorums are not supported, it is imperative that an odd number of replicas |
| 2 | ## be chosen. Moreover, the number of replicas should be either 1, 3, 5, or 7. |
| 3 | ## |
| 4 | ## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper#stateful-set |
| 5 | replicaCount: 3 # Desired quantity of ZooKeeper pods. This should always be (1,3,5, or 7) |
| 6 | |
| 7 | podDisruptionBudget: |
| 8 | maxUnavailable: 1 # Limits how many Zokeeper pods may be unavailable due to voluntary disruptions. |
| 9 | |
| 10 | terminationGracePeriodSeconds: 1800 # Duration in seconds a Zokeeper pod needs to terminate gracefully. |
| 11 | |
| 12 | ## OnDelete requires you to manually delete each pod when making updates. |
| 13 | ## This approach is at the moment safer than RollingUpdate because replication |
| 14 | ## may be incomplete when replication source pod is killed. |
| 15 | ## |
| 16 | ## ref: http://blog.kubernetes.io/2017/09/kubernetes-statefulsets-daemonsets.html |
| 17 | updateStrategy: |
| 18 | type: OnDelete # Pods will only be created when you manually delete old pods. |
| 19 | |
| 20 | ## refs: |
| 21 | ## - https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper |
| 22 | ## - https://github.com/kubernetes/contrib/blob/master/statefulsets/zookeeper/Makefile#L1 |
| 23 | image: |
| 24 | #repository: nexus3.onap.org:10001/library/zookeeper |
| 25 | #tag: 3.3 |
| 26 | repository: gcr.io/google_samples/k8szk # Container image repository for zookeeper container. |
| 27 | tag: v3 # Container image tag for zookeeper container. |
| 28 | pullPolicy: IfNotPresent # Image pull criteria for zookeeper container. |
| 29 | |
| 30 | service: |
| 31 | name: zookeeper |
| 32 | type: ClusterIP # Exposes zookeeper on a cluster-internal IP. |
| 33 | annotations: {} # Arbitrary non-identifying metadata for zookeeper service. |
| 34 | ## AWS example for use with LoadBalancer service type. |
| 35 | # external-dns.alpha.kubernetes.io/hostname: zookeeper.cluster.local |
| 36 | # service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" |
| 37 | # service.beta.kubernetes.io/aws-load-balancer-internal: "true" |
| 38 | ports: |
| 39 | client: |
| 40 | port: 2181 # Service port number for client port. |
| 41 | targetPort: client # Service target port for client port. |
| 42 | protocol: TCP # Service port protocol for client port. |
| 43 | |
| 44 | |
| 45 | ports: |
| 46 | client: |
| 47 | containerPort: 2181 # Port number for zookeeper container client port. |
| 48 | protocol: TCP # Protocol for zookeeper container client port. |
| 49 | election: |
| 50 | containerPort: 3888 # Port number for zookeeper container election port. |
| 51 | protocol: TCP # Protocol for zookeeper container election port. |
| 52 | server: |
| 53 | containerPort: 2888 # Port number for zookeeper container server port. |
| 54 | protocol: TCP # Protocol for zookeeper container server port. |
| 55 | |
| 56 | # Resource Limit flavor -By Default using small |
Sylvain Desbureaux | 5ad860d | 2019-10-29 18:00:15 +0100 | [diff] [blame] | 57 | flavor: small |
Nelson,Thomas(tn1381)(arthurdent3) | 4807fdf | 2018-09-19 16:52:36 -0400 | [diff] [blame] | 58 | # Segregation for Different environment (Small and Large) |
| 59 | resources: |
| 60 | small: |
| 61 | limits: |
Sylvain Desbureaux | 5ad860d | 2019-10-29 18:00:15 +0100 | [diff] [blame] | 62 | cpu: 500m |
| 63 | memory: 900Mi |
Nelson,Thomas(tn1381)(arthurdent3) | 4807fdf | 2018-09-19 16:52:36 -0400 | [diff] [blame] | 64 | requests: |
Sylvain Desbureaux | 5ad860d | 2019-10-29 18:00:15 +0100 | [diff] [blame] | 65 | cpu: 10m |
| 66 | memory: 730Mi |
Nelson,Thomas(tn1381)(arthurdent3) | 4807fdf | 2018-09-19 16:52:36 -0400 | [diff] [blame] | 67 | large: |
| 68 | limits: |
| 69 | cpu: 3 |
| 70 | memory: 2Gi |
| 71 | requests: |
Sylvain Desbureaux | 5ad860d | 2019-10-29 18:00:15 +0100 | [diff] [blame] | 72 | cpu: 2 |
| 73 | memory: 1Gi |
Nelson,Thomas(tn1381)(arthurdent3) | 4807fdf | 2018-09-19 16:52:36 -0400 | [diff] [blame] | 74 | unlimited: {} |
| 75 | |
| 76 | nodeSelector: {} # Node label-values required to run zookeeper pods. |
| 77 | |
| 78 | tolerations: [] # Node taint overrides for zookeeper pods. |
| 79 | |
| 80 | affinity: {} # Criteria by which pod label-values influence scheduling for zookeeper pods. |
Sylvain Desbureaux | 5ad860d | 2019-10-29 18:00:15 +0100 | [diff] [blame] | 81 | affinity: |
Nelson,Thomas(tn1381)(arthurdent3) | 4807fdf | 2018-09-19 16:52:36 -0400 | [diff] [blame] | 82 | podAntiAffinity: |
| 83 | requiredDuringSchedulingIgnoredDuringExecution: |
| 84 | - topologyKey: "kubernetes.io/hostname" |
| 85 | labelSelector: |
| 86 | matchLabels: |
| 87 | release: zookeeper |
| 88 | |
| 89 | podAnnotations: {} # Arbitrary non-identifying metadata for zookeeper pods. |
| 90 | |
| 91 | podLabels: {} # Key/value pairs that are attached to zookeeper pods. |
| 92 | |
| 93 | livenessProbe: |
| 94 | exec: |
| 95 | command: |
| 96 | - zkOk.sh |
| 97 | initialDelaySeconds: 20 |
| 98 | |
| 99 | readinessProbe: |
| 100 | exec: |
| 101 | command: |
| 102 | - zkOk.sh |
| 103 | initialDelaySeconds: 20 |
| 104 | |
| 105 | securityContext: |
| 106 | fsGroup: 1000 |
| 107 | #runAsUser: 1000 |
| 108 | |
| 109 | persistence: |
| 110 | enabled: true |
| 111 | ## zookeeper data Persistent Volume Storage Class |
| 112 | ## If defined, storageClassName: <storageClass> |
| 113 | ## If set to "-", storageClassName: "", which disables dynamic provisioning |
| 114 | ## If undefined (the default) or set to null, no storageClassName spec is |
| 115 | ## set, choosing the default provisioner. (gp2 on AWS, standard on |
| 116 | ## GKE, AWS & OpenStack) |
| 117 | ## |
| 118 | volumeReclaimPolicy: Retain |
| 119 | accessMode: ReadWriteOnce |
| 120 | mountPath: /dockerdata-nfs |
| 121 | mountSubPath: music/zookeeper |
| 122 | storageType: local |
| 123 | storageClass: "" |
| 124 | size: 4Gi |
| 125 | |
| 126 | ## Exporters query apps for metrics and make those metrics available for |
| 127 | ## Prometheus to scrape. |
| 128 | exporters: |
| 129 | |
| 130 | jmx: |
| 131 | enabled: false |
| 132 | image: |
| 133 | repository: sscaling/jmx-prometheus-exporter |
| 134 | tag: 0.3.0 |
| 135 | pullPolicy: IfNotPresent |
| 136 | config: |
| 137 | lowercaseOutputName: false |
| 138 | rules: |
| 139 | - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+)><>(\\w+)" |
| 140 | name: "zookeeper_$2" |
| 141 | - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)" |
| 142 | name: "zookeeper_$3" |
| 143 | labels: |
| 144 | replicaId: "$2" |
| 145 | - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(\\w+)" |
| 146 | name: "zookeeper_$4" |
| 147 | labels: |
| 148 | replicaId: "$2" |
| 149 | memberType: "$3" |
| 150 | - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+), name3=(\\w+)><>(\\w+)" |
| 151 | name: "zookeeper_$4_$5" |
| 152 | labels: |
| 153 | replicaId: "$2" |
| 154 | memberType: "$3" |
| 155 | startDelaySeconds: 30 |
| 156 | env: {} |
| 157 | resources: {} |
| 158 | path: /metrics |
| 159 | ports: |
| 160 | jmxxp: |
| 161 | containerPort: 9404 |
| 162 | protocol: TCP |
| 163 | livenessProbe: |
| 164 | httpGet: |
| 165 | path: /metrics |
| 166 | port: jmxxp |
| 167 | initialDelaySeconds: 30 |
| 168 | periodSeconds: 15 |
| 169 | timeoutSeconds: 60 |
| 170 | failureThreshold: 8 |
| 171 | successThreshold: 1 |
| 172 | readinessProbe: |
| 173 | httpGet: |
| 174 | path: /metrics |
| 175 | port: jmxxp |
| 176 | initialDelaySeconds: 30 |
| 177 | periodSeconds: 15 |
| 178 | timeoutSeconds: 60 |
| 179 | failureThreshold: 8 |
| 180 | successThreshold: 1 |
| 181 | |
| 182 | zookeeper: |
| 183 | enabled: false |
| 184 | image: |
| 185 | repository: josdotso/zookeeper-exporter |
| 186 | tag: v1.1.2 |
| 187 | pullPolicy: IfNotPresent |
| 188 | config: |
| 189 | logLevel: info |
| 190 | resetOnScrape: "true" |
| 191 | env: {} |
| 192 | resources: {} |
| 193 | path: /metrics |
| 194 | ports: |
| 195 | zookeeperxp: |
| 196 | containerPort: 9141 |
| 197 | protocol: TCP |
| 198 | livenessProbe: |
| 199 | httpGet: |
| 200 | path: /metrics |
| 201 | port: zookeeperxp |
| 202 | initialDelaySeconds: 30 |
| 203 | periodSeconds: 15 |
| 204 | timeoutSeconds: 60 |
| 205 | failureThreshold: 8 |
| 206 | successThreshold: 1 |
| 207 | readinessProbe: |
| 208 | httpGet: |
| 209 | path: /metrics |
| 210 | port: zookeeperxp |
| 211 | initialDelaySeconds: 30 |
| 212 | periodSeconds: 15 |
| 213 | timeoutSeconds: 60 |
| 214 | failureThreshold: 8 |
| 215 | successThreshold: 1 |
| 216 | |
| 217 | env: |
| 218 | |
| 219 | ## Options related to JMX exporter. |
| 220 | JMXAUTH: "false" |
| 221 | JMXDISABLE: "false" |
| 222 | JMXPORT: 1099 |
| 223 | JMXSSL: "false" |
| 224 | |
| 225 | ## The port on which the server will accept client requests. |
| 226 | ZK_CLIENT_PORT: 2181 |
| 227 | |
| 228 | ## The port on which the ensemble performs leader election. |
| 229 | ZK_ELECTION_PORT: 3888 |
| 230 | |
| 231 | ## The JVM heap size. |
| 232 | ZK_HEAP_SIZE: 2G |
| 233 | |
| 234 | ## The number of Ticks that an ensemble member is allowed to perform leader |
| 235 | ## election. |
| 236 | ZK_INIT_LIMIT: 5 |
| 237 | |
| 238 | ## The Log Level that for the ZooKeeper processes logger. |
| 239 | ## Choices are `TRACE,DEBUG,INFO,WARN,ERROR,FATAL`. |
| 240 | ZK_LOG_LEVEL: INFO |
| 241 | |
| 242 | ## The maximum number of concurrent client connections that |
| 243 | ## a server in the ensemble will accept. |
| 244 | ZK_MAX_CLIENT_CNXNS: 60 |
| 245 | |
| 246 | ## The maximum session timeout that the ensemble will allow a client to request. |
| 247 | ## Upstream default is `20 * ZK_TICK_TIME` |
| 248 | ZK_MAX_SESSION_TIMEOUT: 40000 |
| 249 | |
| 250 | ## The minimum session timeout that the ensemble will allow a client to request. |
| 251 | ## Upstream default is `2 * ZK_TICK_TIME`. |
| 252 | ZK_MIN_SESSION_TIMEOUT: 4000 |
| 253 | |
| 254 | ## The delay, in hours, between ZooKeeper log and snapshot cleanups. |
| 255 | ZK_PURGE_INTERVAL: 0 |
| 256 | |
| 257 | ## The port on which the leader will send events to followers. |
| 258 | ZK_SERVER_PORT: 2888 |
| 259 | |
| 260 | ## The number of snapshots that the ZooKeeper process will retain if |
| 261 | ## `ZK_PURGE_INTERVAL` is set to a value greater than `0`. |
| 262 | ZK_SNAP_RETAIN_COUNT: 3 |
| 263 | |
| 264 | ## The number of Tick by which a follower may lag behind the ensembles leader. |
| 265 | ZK_SYNC_LIMIT: 10 |
| 266 | |
| 267 | ## The number of wall clock ms that corresponds to a Tick for the ensembles |
| 268 | ## internal time. |
| 269 | ZK_TICK_TIME: 2000 |
| 270 | |
| 271 | jobs: |
| 272 | chroots: |
| 273 | enabled: false |
| 274 | activeDeadlineSeconds: 300 |
| 275 | backoffLimit: 5 |
| 276 | completions: 1 |
| 277 | config: |
| 278 | create: [] |
| 279 | # - /kafka |
| 280 | # - /ureplicator |
| 281 | env: [] |
| 282 | parallelism: 1 |
| 283 | resources: {} |
| 284 | restartPolicy: Never |