blob: ccc6b69971fa345aba3c7912b55b589e0744054e [file] [log] [blame]
Ritu Sood23699102019-04-24 23:06:46 +00001# Copyright © 2019 Intel Corporation Inc
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15apiVersion: apps/v1beta1
16kind: StatefulSet
17metadata:
18 name: {{ include "common.servicename" . }}
19 labels:
20 heritage: "{{ .Release.Service }}"
21 release: "{{ .Release.Name }}"
22 chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
23 app: {{ template "common.name" . }}
24spec:
25 serviceName: {{ include "common.servicename" . }}
26 replicas: {{ .Values.replicaCount }}
27 template:
28 metadata:
29 labels:
30 heritage: "{{ .Release.Service }}"
31 release: "{{ .Release.Name }}"
32 chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
33 app: {{ include "common.name" . }}
34 spec:
35{{- if .Values.affinity }}
36 affinity:
37{{ toYaml .Values.affinity | indent 8 }}
38{{- end }}
39{{- if .Values.nodeSelector }}
40 nodeSelector:
41{{ toYaml .Values.nodeSelector | indent 8 }}
42{{- end }}
43{{- if .Values.tolerations }}
44 tolerations:
45{{ toYaml .Values.tolerations | indent 8 }}
46{{- end }}
47 containers:
48 - name: {{ include "common.servicename" . }}
49 image: "{{ .Values.repository }}/{{ .Values.image }}"
50 imagePullPolicy: "{{ .Values.pullPolicy }}"
51 ports:
52 - containerPort: {{ .Values.service.peerInternalPort }}
53 name: {{ .Values.service.peerPortName }}
54 - containerPort: {{ .Values.service.clientInternalPort }}
55 name: {{ .Values.service.clientPortName }}
56 {{- if eq .Values.liveness.enabled true }}
57 livenessProbe:
58 exec:
59 command: ["/bin/sh", "-c", "etcdctl cluster-health | grep -w healthy" ]
60 initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
61 periodSeconds: {{ .Values.liveness.periodSeconds }}
62 timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
63 {{ end -}}
64 readinessProbe:
65 exec:
66 command: ["/bin/sh", "-c", "etcdctl cluster-health | grep -w healthy" ]
67 initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
68 periodSeconds: {{ .Values.readiness.periodSeconds }}
69 resources:
70{{ include "common.resources" . | indent 10 }}
71 env:
72 - name: INITIAL_CLUSTER_SIZE
73 value: {{ .Values.replicaCount | quote }}
74 - name: SET_NAME
75 value: {{ include "common.servicename" . }}
76{{- if .Values.extraEnv }}
77{{ toYaml .Values.extraEnv | indent 8 }}
78{{- end }}
79 lifecycle:
80 preStop:
81 exec:
82 command:
83 - "/bin/sh"
84 - "-ec"
85 - |
86 EPS=""
87 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
88 EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
89 done
90
91 HOSTNAME=$(hostname)
92
93 member_hash() {
94 etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
95 }
96
97 SET_ID=${HOSTNAME##*[^0-9]}
98
99 if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
100 echo "Removing ${HOSTNAME} from etcd cluster"
101 ETCDCTL_ENDPOINT=${EPS} etcdctl member remove $(member_hash)
102 if [ $? -eq 0 ]; then
103 # Remove everything otherwise the cluster will no longer scale-up
104 rm -rf /var/run/etcd/*
105 fi
106 fi
107 command:
108 - "/bin/sh"
109 - "-ec"
110 - |
111 HOSTNAME=$(hostname)
112
113 # store member id into PVC for later member replacement
114 collect_member() {
115 while ! etcdctl member list &>/dev/null; do sleep 1; done
116 etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
117 exit 0
118 }
119
120 eps() {
121 EPS=""
122 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
123 EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
124 done
125 echo ${EPS}
126 }
127
128 member_hash() {
129 etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
130 }
131
132 # we should wait for other pods to be up before trying to join
133 # otherwise we got "no such host" errors when trying to resolve other members
134 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
135 while true; do
136 echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up"
137 ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME} > /dev/null && break
138 sleep 1s
139 done
140 done
141
142 # re-joining after failure?
143 if [ -e /var/run/etcd/default.etcd ]; then
144 echo "Re-joining etcd member"
145 member_id=$(cat /var/run/etcd/member_id)
146
147 # re-join member
148 ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SET_NAME}:2380 | true
149 exec etcd --name ${HOSTNAME} \
150 --listen-peer-urls http://0.0.0.0:2380 \
151 --listen-client-urls http://0.0.0.0:2379\
152 --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
153 --data-dir /var/run/etcd/default.etcd
154 fi
155
156 # etcd-SET_ID
157 SET_ID=${HOSTNAME##*[^0-9]}
158
159 # adding a new member to existing cluster (assuming all initial pods are available)
160 if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
161 export ETCDCTL_ENDPOINT=$(eps)
162
163 # member already added?
164 MEMBER_HASH=$(member_hash)
165 if [ -n "${MEMBER_HASH}" ]; then
166 # the member hash exists but for some reason etcd failed
167 # as the datadir has not be created, we can remove the member
168 # and retrieve new hash
169 etcdctl member remove ${MEMBER_HASH}
170 fi
171
172 echo "Adding new member"
173 etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SET_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
174
175 if [ $? -ne 0 ]; then
176 echo "Exiting"
177 rm -f /var/run/etcd/new_member_envs
178 exit 1
179 fi
180
181 cat /var/run/etcd/new_member_envs
182 source /var/run/etcd/new_member_envs
183
184 collect_member &
185
186 exec etcd --name ${HOSTNAME} \
187 --listen-peer-urls http://0.0.0.0:2380 \
188 --listen-client-urls http://0.0.0.0:2379 \
189 --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
190 --data-dir /var/run/etcd/default.etcd \
191 --initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
192 --initial-cluster ${ETCD_INITIAL_CLUSTER} \
193 --initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}
194 fi
195
196 PEERS=""
197 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
198 PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380"
199 done
200
201 collect_member &
202
203 # join member
204 exec etcd --name ${HOSTNAME} \
205 --initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
206 --listen-peer-urls http://0.0.0.0:2380 \
207 --listen-client-urls http://0.0.0.0:2379 \
208 --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
209 --initial-cluster-token etcd-cluster-1 \
210 --initial-cluster ${PEERS} \
211 --initial-cluster-state new \
212 --data-dir /var/run/etcd/default.etcd
213 volumeMounts:
214 - name: {{ include "common.servicename" . }}-datadir
215 mountPath: /var/run/etcd
216 {{- if .Values.persistence.enabled }}
217 volumeClaimTemplates:
218 - metadata:
219 name: {{ include "common.servicename" . }}-data
220 spec:
221 accessModes:
222 - "{{ .Values.persistence.accessMode }}"
223 resources:
224 requests:
225 # upstream recommended max is 700M
226 storage: "{{ .Values.persistence.storage }}"
227 {{- if .Values.persistence.storageClass }}
228 {{- if (eq "-" .Values.persistence.storageClass) }}
229 storageClassName: ""
230 {{- else }}
231 storageClassName: "{{ .Values.persistence.storageClass }}"
232 {{- end }}
233 {{- end }}
234 {{- else }}
235 volumes:
236 - name: {{ include "common.servicename" . }}-datadir
237 {{- if .Values.memoryMode }}
238 emptyDir:
239 medium: Memory
240 {{- else }}
241 emptyDir: {}
242 {{- end }}
243 {{- end }}
244