blob: d9df3babf31ca0d98f235fdd1a8e86056d790138 [file] [log] [blame]
Andrew Gauld4e7e20e2017-08-22 13:33:45 +00001# ============LICENSE_START====================================================
2# org.onap.dcae
3# =============================================================================
4# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
5# =============================================================================
6# Licensed under the Apache License, Version 2.0 (the "License");
7# you may not use this file except in compliance with the License.
8# You may obtain a copy of the License at
Andrew Gauldb34cbd02017-08-24 16:46:17 -04009#
Andrew Gauld4e7e20e2017-08-22 13:33:45 +000010# http://www.apache.org/licenses/LICENSE-2.0
Andrew Gauldb34cbd02017-08-24 16:46:17 -040011#
Andrew Gauld4e7e20e2017-08-22 13:33:45 +000012# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS,
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17# ============LICENSE_END======================================================
18
19set -x
20#
21# get configuration
22#
23CODE_SOURCE=$1
24CODE_VERSION=$2
25CLUSTER_INDEX=$3
26CLUSTER_SIZE=$4
27CLUSTER_FQDNS=$5
28CLUSTER_LOCAL_IPS=$6
29CLUSTER_FLOATING_IPS=$7
30DATACENTER=$8
31REGISTERED_NAME=$9
32export JAVA_HOME=/usr/lib/jvm/default-java
33md5sum /root/.sshkey/id_rsa | awk '{ print $1 }' >/root/.mysqlpw
34chmod 400 /root/.mysqlpw
35#
36# enable outside apt repositories
37#
38wget -qO- http://public-repo-1.hortonworks.com/HDP/ubuntu16/2.x/updates/2.6.0.3/hdp.list >/etc/apt/sources.list.d/hdp.list
39wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/cask.list >/etc/apt/sources.list.d/cask.list
40wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/pubkey.gpg | apt-key add -
Andrew Gauldaf9a65c2017-10-20 09:46:43 -040041apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 B9733A7A07513CAD
Andrew Gauld4e7e20e2017-08-22 13:33:45 +000042apt-get update
43#
44# install software from apt repositories
45#
46apt-get install -y default-jdk hadoop-hdfs hadoop-mapreduce hive hbase libsnappy-dev liblzo2-dev hadooplzo spark-master spark-python zip unzip
47usermod -a -G hadoop hive
48if [ $CLUSTER_INDEX -lt 3 ]
49then
50 apt-get install -y zookeeper-server
51 cat <<!EOF >>/etc/zookeeper/conf/zookeeper-env.sh
52export JAVA_HOME=/usr/lib/jvm/default-java
53export ZOOCFGDIR=/etc/zookeeper/conf
54export ZOO_LOG_DIR=/var/log/zookeeper
55export ZOOPIDFILE=/var/run/zookeeper/zookeeper_server.pid
56!EOF
57 mkdir -p /var/lib/zookeeper
58 chown zookeeper:zookeeper /var/lib/zookeeper
59 cp /usr/hdp/current/zookeeper-server/etc/init.d/zookeeper-server /etc/init.d/.
60 update-rc.d zookeeper-server defaults
61 service zookeeper-server start
62fi
63if [ $CLUSTER_INDEX -eq 2 ]
64then
65 debconf-set-selections <<!
66mysql-server mysql-server/root_password password $(cat /root/.mysqlpw)
67!
68 debconf-set-selections <<!
69mysql-server mysql-server/root_password_again password $(cat /root/.mysqlpw)
70!
71 apt-get install -y cdap cdap-cli cdap-gateway cdap-kafka cdap-master cdap-security cdap-ui mysql-server mysql-connector-java
72set +x
73echo + mysql_secure_installation --use-default
74mysql_secure_installation --use-default --password=$(cat /root/.mysqlpw)
75set -x
76 mysql_install_db
77 cp /usr/share/java/mysql-connector-java-*.jar /usr/hdp/current/hive-client/lib/.
78 mkdir -p /usr/lib/hive/logs
79 chown -R hive:hadoop /usr/lib/hive
80 chmod -R 755 /usr/lib/hive
81fi
82#
83# make directories
84#
85mkdir -p /hadoop/hdfs/journalnode/cl /hadoop/hdfs/namenode /hadoop/hdfs/data /etc/hadoop/conf /hadoop/yarn/local /hadoop/yarn/log /usr/lib/hadoop/logs /usr/lib/hadoop-mapreduce/logs /usr/lib/hadoop-yarn/logs /usr/lib/hbase/logs /etc/cdap/conf
86#
87# set up config files
88#
89HDPVER=$(ls /usr/hdp | grep -v current)
90echo -Dhdp.version=$HDPVER >/usr/hdp/current/spark-client/conf/java-opts
91echo "export OPTS=\"\${OPTS} -Dhdp.version=$HDPVER\"" >>/etc/cdap/conf/cdap-env.sh
92cat >/etc/profile.d/hadoop.sh <<'!EOF'
93HADOOP_PREFIX=/usr/hdp/current/hadoop-client
94HADOOP_YARN_HOME=/usr/hdp/current/hadoop-yarn-nodemanager
95HADOOP_HOME=/usr/hdp/current/hadoop-client
96HADOOP_COMMON_HOME=$HADOOP_HOME
97HADOOP_CONF_DIR=/etc/hadoop/conf
98HADOOP_HDFS_HOME=/usr/hdp/current/hadoop-hdfs-namenode
99HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec
100YARN_LOG_DIR=/usr/lib/hadoop-yarn/logs
101HADOOP_LOG_DIR=/usr/lib/hadoop/logs
102JAVA_HOME=/usr/lib/jvm/default-java
103JAVA=$JAVA_HOME/bin/java
104PATH=$PATH:$HADOOP_HOME/bin
105HBASE_LOG_DIR=/usr/lib/hbase/logs
106HADOOP_MAPRED_LOG_DIR=/usr/lib/hadoop-mapreduce/logs
107HBASE_CONF_DIR=/etc/hbase/conf
108export HADOOP_PREFIX HADOOP_HOME HADOOP_COMMON_HOME HADOOP_CONF_DIR HADOOP_HDFS_HOME JAVA_HOME PATH HADOOP_LIBEXEC_DIR JAVA JARN_LOG_DIR HADOOP_LOG_DIR HBASE_LOG_DIR HADOOP_MAPRED_LOG_DIR HBASE_CONF_DIR
109!EOF
110chmod 755 /etc/profile.d/hadoop.sh
111cat </etc/profile.d/hadoop.sh >>/etc/hadoop/conf/hadoop-env.sh
112mv /root/.sshkey /var/lib/hadoop-hdfs/.ssh
113cp /var/lib/hadoop-hdfs/.ssh/id_rsa.pub /var/lib/hadoop-hdfs/.ssh/authorized_keys
114>/etc/hadoop/conf/dfs.exclude
115>/etc/hadoop/conf/yarn.exclude
116chown -R hdfs:hadoop /var/lib/hadoop-hdfs/.ssh /hadoop /usr/lib/hadoop
117chown -R yarn:hadoop /usr/lib/hadoop-yarn /hadoop/yarn
118chown -R mapred:hadoop /usr/lib/hadoop-mapreduce
119chown -R hbase:hbase /usr/lib/hbase
120chmod 700 /var/lib/hadoop-hdfs/.ssh
121chmod 600 /var/lib/hadoop-hdfs/.ssh/*
122sed -i -e '/maxClientCnxns/d' /etc/zookeeper/conf/zoo.cfg
123
124cat >/tmp/init.py <<!EOF
125import os
126with open('/root/.mysqlpw', 'r') as f:
127 mysqlpw = f.readline().strip()
128myid=int('$CLUSTER_INDEX')
129count=$CLUSTER_SIZE
130fqdns='$CLUSTER_FQDNS'.split(',')
131localips='$CLUSTER_LOCAL_IPS'.split(',')
132floatingips='$CLUSTER_FLOATING_IPS'.split(',')
133with open('/etc/hosts', 'a') as f:
134 f.write("\n")
135 for index in range(0, count):
136 hn=fqdns[index][0: fqdns[index].index('.')]
137 f.write("{ip} {fqdn} {hn}\n".format(ip=localips[index],hn=hn,fqdn=fqdns[index]))
Andrew Gauldb34cbd02017-08-24 16:46:17 -0400138
Andrew Gauld4e7e20e2017-08-22 13:33:45 +0000139def pxc(f, m):
140 a = "<?xml version='1.0' encoding='UTF-8'?>\n<?xml-stylesheet type='text/xsl' href='configuration.xsl'?>\n<configuration>"
141 for n in m.keys():
142 a = a + "\n <property>\n <name>{n}</name>\n <value>{v}</value>\n </property>".format(n=n,v=m[n])
143 a = a + "\n</configuration>\n"
144 with open(f, 'w') as xml:
145 xml.write(a)
146pxc('/etc/hadoop/conf/core-site.xml', {
147 'fs.defaultFS':'hdfs://cl'
148 })
149pxc('/etc/hadoop/conf/hdfs-site.xml', {
150 'dfs.namenode.datanode.registration.ip-hostname-check':'false',
151 'dfs.namenode.name.dir':'/hadoop/hdfs/namenode',
152 'dfs.hosts.exclude':'/etc/hadoop/conf/dfs.exclude',
153 'dfs.datanode.data.dir':'/hadoop/hdfs/data',
154 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode',
155 'dfs.nameservices':'cl',
156 'dfs.ha.namenodes.cl':'nn1,nn2',
157 'dfs.namenode.rpc-address.cl.nn1':localips[0]+':8020',
158 'dfs.namenode.rpc-address.cl.nn2':localips[1]+':8020',
159 'dfs.namenode.http-address.cl.nn1':localips[0]+':50070',
160 'dfs.namenode.http-address.cl.nn2':localips[1]+':50070',
161 'dfs.namenode.shared.edits.dir':'qjournal://'+localips[0]+':8485;'+localips[1]+':8485;'+localips[2]+':8485/cl',
162 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode',
163 'dfs.client.failover.proxy.provider.cl':'org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider',
164 'dfs.ha.fencing.methods':'sshfence(hdfs),shell(/bin/true)',
165 'dfs.ha.fencing.ssh.private-key-files':'/var/lib/hadoop-hdfs/.ssh/id_rsa',
166 'dfs.ha.fencing.ssh.connect-timeout':'30000',
167 'dfs.ha.automatic-failover.enabled':'true',
168 'ha.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181'
169 })
170pxc('/etc/hadoop/conf/yarn-site.xml', {
171 'yarn.nodemanager.vmem-check-enabled':'false',
172 'yarn.application.classpath':'/etc/hadoop/conf,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*',
173 'yarn.nodemanager.delete.debug-delay-sec':'43200',
174 'yarn.scheduler.minimum-allocation-mb':'512',
175 'yarn.scheduler.maximum-allocation-mb':'8192',
176 'yarn.nodemanager.local-dirs':'/hadoop/yarn/local',
177 'yarn.nodemanager.log-dirs':'/hadoop/yarn/log',
178 'yarn.resourcemanager.zk-address':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181',
179 'yarn.resourcemanager.ha.enabled':'true',
180 'yarn.resourcemanager.ha.rm-ids':'rm1,rm2',
181 'yarn.resourcemanager.hostname.rm1':localips[1],
182 'yarn.resourcemanager.hostname.rm2':localips[2],
183 'yarn.resourcemanager.cluster-id':'cl',
184 'yarn.resourcemanager.recovery-enabled':'true',
185 'yarn.resourcemanager.store.class':'org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore',
186 'yarn.resourcemanager.nodes.exclude-path':'/etc/hadoop/conf/yarn.exclude'
187 })
188pxc('/etc/hadoop/conf/mapred-site.xml', {
189 'mapreduce.application.classpath':'/etc/hadoop/conf,/usr/lib/hadoop/lib/*,/usr/lib/hadoop/*,/usr/hdp/current/hadoop-hdfs-namenode/,/usr/hdp/current/hadoop-hdfs-namenode/lib/*,/usr/hdp/current/hadoop-hdfs-namenode/*,/usr/hdp/current/hadoop-yarn-nodemanager/lib/*,/usr/hdp/current/hadoop-yarn-nodemanager/*,/usr/hdp/current/hadoop-mapreduce-historyserver/lib/*,/usr/hdp/current/hadoop-mapreduce-historyserver/*',
190 'mapreduce.jobhistory.intermediate-done-dir':'/mr-history/tmp',
191 'mapreduce.jobhistory.done-dir':'/mr-history/done',
192 'mapreduce.jobhistory.address':localips[1],
193 'mapreduce.jobhistory.webapp.address':localips[1]
194 })
195pxc('/etc/hbase/conf/hbase-site.xml', {
196 'hbase.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181',
197 'hbase.rootdir':'hdfs://cl/apps/hbase/data',
198 'hbase.cluster.distributed':'true'
199 })
200pxc('/etc/hive/conf/hive-site.xml', {
201 'fs.file.impl.disable.cache':'true',
202 'fs.hdfs.impl.disable.cache':'true',
203 'hadoop.clientside.fs.operations':'true',
204 'hive.auto.convert.join.noconditionaltask.size':'1000000000',
205 'hive.auto.convert.sortmerge.join.noconditionaltask':'true',
206 'hive.auto.convert.sortmerge.join':'true',
207 'hive.enforce.bucketing':'true',
208 'hive.enforce.sorting':'true',
209 'hive.mapjoin.bucket.cache.size':'10000',
210 'hive.mapred.reduce.tasks.speculative.execution':'false',
211 'hive.metastore.cache.pinobjtypes':'Table,Database,Type,FieldSchema,Order',
212 'hive.metastore.client.socket.timeout':'60s',
213 'hive.metastore.local':'true',
214 'hive.metastore.uris':'thrift://' + fqdns[2] + ':9083',
215 'hive.metastore.warehouse.dir':'/apps/hive/warehouse',
216 'hive.optimize.bucketmapjoin.sortedmerge':'true',
217 'hive.optimize.bucketmapjoin':'true',
218 'hive.optimize.mapjoin.mapreduce':'true',
219 'hive.optimize.reducededuplication.min.reducer':'1',
220 'hive.security.authorization.manager':'org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider',
221 'hive.semantic.analyzer.factory.impl':'org.apache.hivealog.cli.HCatSemanticAnalyzerFactory',
222 'javax.jdo.option.ConnectionDriverName':'com.mysql.jdbc.Driver',
223 'javax.jdo.option.ConnectionPassword': mysqlpw,
224 'javax.jdo.option.ConnectionURL':'jdbc:mysql://localhost:3306/metastore?createDatabaseIfNotExist=true',
225 'javax.jdo.option.ConnectionUserName':'root'
226 })
227if myid == 2:
228 pxc('/etc/cdap/conf/cdap-site.xml', {
229 'zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181/\${root.namespace}',
230 'router.server.address':localips[2],
231 'explore.enabled':'true',
232 'enable.unrecoverable.reset':'true',
233 'kafka.seed.brokers':localips[2] + ':9092',
234 'app.program.jvm.opts':'-XX:MaxPermSize=128M \${twill.jvm.gc.opts} -Dhdp.version=$HDPVER -Dspark.yarn.am.extraJavaOptions=-Dhdp.version=$HDPVER'
235 })
236with open('/etc/hbase/conf/regionservers', 'w') as f:
237 for ip in localips:
238 f.write('{ip}\n'.format(ip=ip))
239with open('/etc/hbase/conf/hbase-env.sh', 'a') as f:
240 f.write("export HBASE_MANAGES_ZK=false\n")
241with open('/etc/zookeeper/conf/zoo.cfg', 'a') as f:
242 f.write("server.1={L1}:2888:3888\nserver.2={L2}:2888:3888\nserver.3={L3}:2888:3888\nmaxClientCnxns=0\nautopurge.purgeInterval=6\n".format(L1=localips[0],L2=localips[1],L3=localips[2]))
243with open('/etc/clustermembers', 'w') as f:
244 f.write("export me={me}\n".format(me=myid))
245 for idx in range(len(localips)):
246 f.write("export n{i}={ip}\n".format(i=idx, ip=localips[idx]))
247 f.write("export N{i}={ip}\n".format(i=idx, ip=floatingips[idx]))
248with open('/etc/hadoop/conf/slaves', 'w') as f:
249 for idx in range(len(localips)):
250 if idx != myid:
251 f.write("{x}\n".format(x=localips[idx]))
252if myid < 3:
253 with open('/var/lib/zookeeper/myid', 'w') as f:
254 f.write("{id}".format(id=(myid + 1)))
255 os.system('service zookeeper-server restart')
256for ip in localips:
257 os.system("su - hdfs -c \"ssh -o StrictHostKeyChecking=no -o NumberOfPasswordPrompts=0 {ip} echo Connectivity to {ip} verified\"".format(ip=ip))
258!EOF
259
260python /tmp/init.py
261
262. /etc/clustermembers
263waitfor() {
264 while ( ! nc $1 $2 </dev/null )
265 do
266 echo waiting for $1 port $2
267 sleep 30
268 done
269}
270# journal nodes are on port 8485
271if [ $me -lt 3 ]
272then
273 su - hdfs -c '$HADOOP_HOME/sbin/hadoop-daemon.sh start journalnode'
274 waitfor $n0 8485
275 waitfor $n1 8485
276 waitfor $n2 8485
277fi
278if [ $me -eq 0 -a "$setupdone" = "" ]
279then
280 su - hdfs -c 'hdfs namenode -format -nonInteractive'
281 su - hdfs -c 'hdfs zkfc -formatZK'
282fi
283if [ $me -eq 1 -a "$setupdone" = "" ]
284then
285 waitfor $n0 8020
286 su - hdfs -c 'hdfs namenode -bootstrapStandby -nonInteractive'
287 su - yarn -c 'yarn resourcemanager -format-state-store'
288fi
289if [ $me -eq 0 -o $me -eq 1 ]
290then
291 su - hdfs -c '$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc'
292 su - hdfs -c '$HADOOP_HOME/sbin/hadoop-daemon.sh start namenode'
293fi
294su - hdfs -c '$HADOOP_HOME/sbin/hadoop-daemon.sh start datanode'
295if [ $me -eq 1 -o $me -eq 2 ]
296then
297 su - yarn -c '/usr/hdp/current/hadoop-yarn-nodemanager/sbin/yarn-daemon.sh start resourcemanager'
298fi
299su - yarn -c '/usr/hdp/current/hadoop-yarn-nodemanager/sbin/yarn-daemon.sh start nodemanager'
300waitfor $n0 8020
301waitfor $n1 8020
302su - hdfs -c 'hdfs dfsadmin -safemode wait'
303if [ $me -eq 1 ]
304then
305 if [ "$setupdone" = "" ]
306 then
307 su - hdfs -c 'hdfs dfs -mkdir -p /mr-history/tmp'
308 su - hdfs -c 'hdfs dfs -chmod -R 1777 /mr-history/tmp'
309 su - hdfs -c 'hdfs dfs -mkdir -p /mr-history/done'
310 su - hdfs -c 'hdfs dfs -chmod -R 1777 /mr-history/done'
311 su - hdfs -c 'hdfs dfs -chown -R mapred:hdfs /mr-history'
312 su - hdfs -c 'hdfs dfs -mkdir -p /app-logs'
313 su - hdfs -c 'hdfs dfs -chmod -R 1777 /app-logs'
314 su - hdfs -c 'hdfs dfs -chown yarn:hdfs /app-logs'
315 su - hdfs -c 'hdfs dfs -mkdir -p /apps/hbase/staging /apps/hbase/data'
316 su - hdfs -c 'hdfs dfs -chown hbase:hdfs /apps/hbase/staging /apps/hbase/data'
317 su - hdfs -c 'hdfs dfs -chmod 711 /apps/hbase/staging'
318 su - hdfs -c 'hdfs dfs -chmod 755 /apps/hbase/data'
319 su - hdfs -c 'hdfs dfs -chown hdfs:hdfs /apps/hbase'
320 su - hdfs -c 'hdfs dfs -mkdir -p /user/yarn'
321 su - hdfs -c 'hdfs dfs -chown yarn:yarn /user/yarn'
322 su - hdfs -c 'hdfs dfs -mkdir -p /cdap/tx.snapshot'
323 su - hdfs -c 'hdfs dfs -chown yarn:yarn /cdap /cdap/tx.snapshot'
324 su - hdfs -c 'hdfs dfs -mkdir -p /user/hive /apps/hive/warehouse /tmp/hive'
325 su - hdfs -c 'hdfs dfs -chown -R hive:hadoop /user/hive /apps/hive /tmp/hive'
326 su - hdfs -c 'hdfs dfs -chmod -R 775 /apps/hive'
327 su - hdfs -c 'hdfs dfs -chmod -R 777 /tmp/hive'
328 fi
329 su - mapred -c '/usr/hdp/current/hadoop-mapreduce-historyserver/sbin/mr-jobhistory-daemon.sh start historyserver'
330 su - hbase -c '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh start master'
331fi
332while [ "" != "$( echo get /hbase/master | hbase zkcli 2>&1 | grep 'Node does not exist: /hbase/master')" ]
333do
334 echo Waiting for hbase master to come up
335 sleep 30
336done
337su - hbase -c '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh start regionserver'
338
339if [ $me -eq 2 ]
340then
341 if [ "$setupdone" = "" ]
342 then
343 su - hive -c '/usr/hdp/current/hive-metastore/bin/schematool -initSchema -dbType mysql'
344 fi
345 su - hive -c 'nohup /usr/hdp/current/hive-metastore/bin/hive --service metastore >>/var/log/hive/hive.out 2>>/var/log/hive/hive.log </dev/null &'
346 (cd /bin; wget https://raw.githubusercontent.com/caskdata/cdap-monitoring-tools/develop/nagios/check_cdap/bin/check_cdap)
347 chmod 755 /bin/check_cdap
Lusheng Jiafe7fb02017-10-17 09:35:34 -0400348 wget -qO- $CODE_SOURCE/${CODE_VERSION}/cloud_init/instconsulagentub16.sh >/tmp/cinst.sh
Andrew Gauld4e7e20e2017-08-22 13:33:45 +0000349 bash /tmp/cinst.sh <<!EOF
350{
351 "bind_addr": "0.0.0.0",
352 "client_addr": "0.0.0.0",
353 "advertise_addr": "$n2",
354 "data_dir": "/opt/consul/data",
355 "datacenter": "$DATACENTER",
356 "http_api_response_headers": {
357 "Access-Control-Allow-Origin": "*"
358 },
359 "rejoin_after_leave": true,
360 "server": false,
361 "ui": false,
362 "enable_syslog": true,
363 "log_level": "info",
364 "service": {
365 "id": "$REGISTERED_NAME",
366 "name": "$REGISTERED_NAME",
367 "address": "$N2",
368 "port": 11015,
369 "checks": [
370 {
371 "script": "/bin/check_cdap",
372 "interval": "60s"
373 }
374 ]
375 }
376}
377!EOF
378 for i in $(cd /etc/init.d; echo *cdap*)
379 do
380 service $i start
381 done
382fi
383
384if [ "$setupdone" = "" ]
385then
386 echo setupdone=true >>/etc/clustermembers
387fi