Andrew Gauld | 4e7e20e | 2017-08-22 13:33:45 +0000 | [diff] [blame] | 1 | # ============LICENSE_START==================================================== |
| 2 | # org.onap.dcae |
| 3 | # ============================================================================= |
| 4 | # Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. |
| 5 | # ============================================================================= |
| 6 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 7 | # you may not use this file except in compliance with the License. |
| 8 | # You may obtain a copy of the License at |
Andrew Gauld | b34cbd0 | 2017-08-24 16:46:17 -0400 | [diff] [blame] | 9 | # |
Andrew Gauld | 4e7e20e | 2017-08-22 13:33:45 +0000 | [diff] [blame] | 10 | # http://www.apache.org/licenses/LICENSE-2.0 |
Andrew Gauld | b34cbd0 | 2017-08-24 16:46:17 -0400 | [diff] [blame] | 11 | # |
Andrew Gauld | 4e7e20e | 2017-08-22 13:33:45 +0000 | [diff] [blame] | 12 | # Unless required by applicable law or agreed to in writing, software |
| 13 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | # See the License for the specific language governing permissions and |
| 16 | # limitations under the License. |
| 17 | # ============LICENSE_END====================================================== |
| 18 | |
| 19 | set -x |
| 20 | # |
| 21 | # get configuration |
| 22 | # |
| 23 | CODE_SOURCE=$1 |
| 24 | CODE_VERSION=$2 |
| 25 | CLUSTER_INDEX=$3 |
| 26 | CLUSTER_SIZE=$4 |
| 27 | CLUSTER_FQDNS=$5 |
| 28 | CLUSTER_LOCAL_IPS=$6 |
| 29 | CLUSTER_FLOATING_IPS=$7 |
| 30 | DATACENTER=$8 |
| 31 | REGISTERED_NAME=$9 |
| 32 | export JAVA_HOME=/usr/lib/jvm/default-java |
| 33 | md5sum /root/.sshkey/id_rsa | awk '{ print $1 }' >/root/.mysqlpw |
| 34 | chmod 400 /root/.mysqlpw |
| 35 | # |
| 36 | # enable outside apt repositories |
| 37 | # |
| 38 | wget -qO- http://public-repo-1.hortonworks.com/HDP/ubuntu16/2.x/updates/2.6.0.3/hdp.list >/etc/apt/sources.list.d/hdp.list |
| 39 | wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/cask.list >/etc/apt/sources.list.d/cask.list |
| 40 | wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/pubkey.gpg | apt-key add - |
Andrew Gauld | af9a65c | 2017-10-20 09:46:43 -0400 | [diff] [blame] | 41 | apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 B9733A7A07513CAD |
Andrew Gauld | 4e7e20e | 2017-08-22 13:33:45 +0000 | [diff] [blame] | 42 | apt-get update |
| 43 | # |
| 44 | # install software from apt repositories |
| 45 | # |
| 46 | apt-get install -y default-jdk hadoop-hdfs hadoop-mapreduce hive hbase libsnappy-dev liblzo2-dev hadooplzo spark-master spark-python zip unzip |
| 47 | usermod -a -G hadoop hive |
| 48 | if [ $CLUSTER_INDEX -lt 3 ] |
| 49 | then |
| 50 | apt-get install -y zookeeper-server |
| 51 | cat <<!EOF >>/etc/zookeeper/conf/zookeeper-env.sh |
| 52 | export JAVA_HOME=/usr/lib/jvm/default-java |
| 53 | export ZOOCFGDIR=/etc/zookeeper/conf |
| 54 | export ZOO_LOG_DIR=/var/log/zookeeper |
| 55 | export ZOOPIDFILE=/var/run/zookeeper/zookeeper_server.pid |
| 56 | !EOF |
| 57 | mkdir -p /var/lib/zookeeper |
| 58 | chown zookeeper:zookeeper /var/lib/zookeeper |
| 59 | cp /usr/hdp/current/zookeeper-server/etc/init.d/zookeeper-server /etc/init.d/. |
| 60 | update-rc.d zookeeper-server defaults |
| 61 | service zookeeper-server start |
| 62 | fi |
| 63 | if [ $CLUSTER_INDEX -eq 2 ] |
| 64 | then |
| 65 | debconf-set-selections <<! |
| 66 | mysql-server mysql-server/root_password password $(cat /root/.mysqlpw) |
| 67 | ! |
| 68 | debconf-set-selections <<! |
| 69 | mysql-server mysql-server/root_password_again password $(cat /root/.mysqlpw) |
| 70 | ! |
| 71 | apt-get install -y cdap cdap-cli cdap-gateway cdap-kafka cdap-master cdap-security cdap-ui mysql-server mysql-connector-java |
| 72 | set +x |
| 73 | echo + mysql_secure_installation --use-default |
| 74 | mysql_secure_installation --use-default --password=$(cat /root/.mysqlpw) |
| 75 | set -x |
| 76 | mysql_install_db |
| 77 | cp /usr/share/java/mysql-connector-java-*.jar /usr/hdp/current/hive-client/lib/. |
| 78 | mkdir -p /usr/lib/hive/logs |
| 79 | chown -R hive:hadoop /usr/lib/hive |
| 80 | chmod -R 755 /usr/lib/hive |
| 81 | fi |
| 82 | # |
| 83 | # make directories |
| 84 | # |
| 85 | mkdir -p /hadoop/hdfs/journalnode/cl /hadoop/hdfs/namenode /hadoop/hdfs/data /etc/hadoop/conf /hadoop/yarn/local /hadoop/yarn/log /usr/lib/hadoop/logs /usr/lib/hadoop-mapreduce/logs /usr/lib/hadoop-yarn/logs /usr/lib/hbase/logs /etc/cdap/conf |
| 86 | # |
| 87 | # set up config files |
| 88 | # |
| 89 | HDPVER=$(ls /usr/hdp | grep -v current) |
| 90 | echo -Dhdp.version=$HDPVER >/usr/hdp/current/spark-client/conf/java-opts |
| 91 | echo "export OPTS=\"\${OPTS} -Dhdp.version=$HDPVER\"" >>/etc/cdap/conf/cdap-env.sh |
| 92 | cat >/etc/profile.d/hadoop.sh <<'!EOF' |
| 93 | HADOOP_PREFIX=/usr/hdp/current/hadoop-client |
| 94 | HADOOP_YARN_HOME=/usr/hdp/current/hadoop-yarn-nodemanager |
| 95 | HADOOP_HOME=/usr/hdp/current/hadoop-client |
| 96 | HADOOP_COMMON_HOME=$HADOOP_HOME |
| 97 | HADOOP_CONF_DIR=/etc/hadoop/conf |
| 98 | HADOOP_HDFS_HOME=/usr/hdp/current/hadoop-hdfs-namenode |
| 99 | HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec |
| 100 | YARN_LOG_DIR=/usr/lib/hadoop-yarn/logs |
| 101 | HADOOP_LOG_DIR=/usr/lib/hadoop/logs |
| 102 | JAVA_HOME=/usr/lib/jvm/default-java |
| 103 | JAVA=$JAVA_HOME/bin/java |
| 104 | PATH=$PATH:$HADOOP_HOME/bin |
| 105 | HBASE_LOG_DIR=/usr/lib/hbase/logs |
| 106 | HADOOP_MAPRED_LOG_DIR=/usr/lib/hadoop-mapreduce/logs |
| 107 | HBASE_CONF_DIR=/etc/hbase/conf |
| 108 | export HADOOP_PREFIX HADOOP_HOME HADOOP_COMMON_HOME HADOOP_CONF_DIR HADOOP_HDFS_HOME JAVA_HOME PATH HADOOP_LIBEXEC_DIR JAVA JARN_LOG_DIR HADOOP_LOG_DIR HBASE_LOG_DIR HADOOP_MAPRED_LOG_DIR HBASE_CONF_DIR |
| 109 | !EOF |
| 110 | chmod 755 /etc/profile.d/hadoop.sh |
| 111 | cat </etc/profile.d/hadoop.sh >>/etc/hadoop/conf/hadoop-env.sh |
| 112 | mv /root/.sshkey /var/lib/hadoop-hdfs/.ssh |
| 113 | cp /var/lib/hadoop-hdfs/.ssh/id_rsa.pub /var/lib/hadoop-hdfs/.ssh/authorized_keys |
| 114 | >/etc/hadoop/conf/dfs.exclude |
| 115 | >/etc/hadoop/conf/yarn.exclude |
| 116 | chown -R hdfs:hadoop /var/lib/hadoop-hdfs/.ssh /hadoop /usr/lib/hadoop |
| 117 | chown -R yarn:hadoop /usr/lib/hadoop-yarn /hadoop/yarn |
| 118 | chown -R mapred:hadoop /usr/lib/hadoop-mapreduce |
| 119 | chown -R hbase:hbase /usr/lib/hbase |
| 120 | chmod 700 /var/lib/hadoop-hdfs/.ssh |
| 121 | chmod 600 /var/lib/hadoop-hdfs/.ssh/* |
| 122 | sed -i -e '/maxClientCnxns/d' /etc/zookeeper/conf/zoo.cfg |
| 123 | |
| 124 | cat >/tmp/init.py <<!EOF |
| 125 | import os |
| 126 | with open('/root/.mysqlpw', 'r') as f: |
| 127 | mysqlpw = f.readline().strip() |
| 128 | myid=int('$CLUSTER_INDEX') |
| 129 | count=$CLUSTER_SIZE |
| 130 | fqdns='$CLUSTER_FQDNS'.split(',') |
| 131 | localips='$CLUSTER_LOCAL_IPS'.split(',') |
| 132 | floatingips='$CLUSTER_FLOATING_IPS'.split(',') |
| 133 | with open('/etc/hosts', 'a') as f: |
| 134 | f.write("\n") |
| 135 | for index in range(0, count): |
| 136 | hn=fqdns[index][0: fqdns[index].index('.')] |
| 137 | f.write("{ip} {fqdn} {hn}\n".format(ip=localips[index],hn=hn,fqdn=fqdns[index])) |
Andrew Gauld | b34cbd0 | 2017-08-24 16:46:17 -0400 | [diff] [blame] | 138 | |
Andrew Gauld | 4e7e20e | 2017-08-22 13:33:45 +0000 | [diff] [blame] | 139 | def pxc(f, m): |
| 140 | a = "<?xml version='1.0' encoding='UTF-8'?>\n<?xml-stylesheet type='text/xsl' href='configuration.xsl'?>\n<configuration>" |
| 141 | for n in m.keys(): |
| 142 | a = a + "\n <property>\n <name>{n}</name>\n <value>{v}</value>\n </property>".format(n=n,v=m[n]) |
| 143 | a = a + "\n</configuration>\n" |
| 144 | with open(f, 'w') as xml: |
| 145 | xml.write(a) |
| 146 | pxc('/etc/hadoop/conf/core-site.xml', { |
| 147 | 'fs.defaultFS':'hdfs://cl' |
| 148 | }) |
| 149 | pxc('/etc/hadoop/conf/hdfs-site.xml', { |
| 150 | 'dfs.namenode.datanode.registration.ip-hostname-check':'false', |
| 151 | 'dfs.namenode.name.dir':'/hadoop/hdfs/namenode', |
| 152 | 'dfs.hosts.exclude':'/etc/hadoop/conf/dfs.exclude', |
| 153 | 'dfs.datanode.data.dir':'/hadoop/hdfs/data', |
| 154 | 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode', |
| 155 | 'dfs.nameservices':'cl', |
| 156 | 'dfs.ha.namenodes.cl':'nn1,nn2', |
| 157 | 'dfs.namenode.rpc-address.cl.nn1':localips[0]+':8020', |
| 158 | 'dfs.namenode.rpc-address.cl.nn2':localips[1]+':8020', |
| 159 | 'dfs.namenode.http-address.cl.nn1':localips[0]+':50070', |
| 160 | 'dfs.namenode.http-address.cl.nn2':localips[1]+':50070', |
| 161 | 'dfs.namenode.shared.edits.dir':'qjournal://'+localips[0]+':8485;'+localips[1]+':8485;'+localips[2]+':8485/cl', |
| 162 | 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode', |
| 163 | 'dfs.client.failover.proxy.provider.cl':'org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider', |
| 164 | 'dfs.ha.fencing.methods':'sshfence(hdfs),shell(/bin/true)', |
| 165 | 'dfs.ha.fencing.ssh.private-key-files':'/var/lib/hadoop-hdfs/.ssh/id_rsa', |
| 166 | 'dfs.ha.fencing.ssh.connect-timeout':'30000', |
| 167 | 'dfs.ha.automatic-failover.enabled':'true', |
| 168 | 'ha.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181' |
| 169 | }) |
| 170 | pxc('/etc/hadoop/conf/yarn-site.xml', { |
| 171 | 'yarn.nodemanager.vmem-check-enabled':'false', |
| 172 | 'yarn.application.classpath':'/etc/hadoop/conf,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*', |
| 173 | 'yarn.nodemanager.delete.debug-delay-sec':'43200', |
| 174 | 'yarn.scheduler.minimum-allocation-mb':'512', |
| 175 | 'yarn.scheduler.maximum-allocation-mb':'8192', |
| 176 | 'yarn.nodemanager.local-dirs':'/hadoop/yarn/local', |
| 177 | 'yarn.nodemanager.log-dirs':'/hadoop/yarn/log', |
| 178 | 'yarn.resourcemanager.zk-address':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181', |
| 179 | 'yarn.resourcemanager.ha.enabled':'true', |
| 180 | 'yarn.resourcemanager.ha.rm-ids':'rm1,rm2', |
| 181 | 'yarn.resourcemanager.hostname.rm1':localips[1], |
| 182 | 'yarn.resourcemanager.hostname.rm2':localips[2], |
| 183 | 'yarn.resourcemanager.cluster-id':'cl', |
| 184 | 'yarn.resourcemanager.recovery-enabled':'true', |
| 185 | 'yarn.resourcemanager.store.class':'org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore', |
| 186 | 'yarn.resourcemanager.nodes.exclude-path':'/etc/hadoop/conf/yarn.exclude' |
| 187 | }) |
| 188 | pxc('/etc/hadoop/conf/mapred-site.xml', { |
| 189 | 'mapreduce.application.classpath':'/etc/hadoop/conf,/usr/lib/hadoop/lib/*,/usr/lib/hadoop/*,/usr/hdp/current/hadoop-hdfs-namenode/,/usr/hdp/current/hadoop-hdfs-namenode/lib/*,/usr/hdp/current/hadoop-hdfs-namenode/*,/usr/hdp/current/hadoop-yarn-nodemanager/lib/*,/usr/hdp/current/hadoop-yarn-nodemanager/*,/usr/hdp/current/hadoop-mapreduce-historyserver/lib/*,/usr/hdp/current/hadoop-mapreduce-historyserver/*', |
| 190 | 'mapreduce.jobhistory.intermediate-done-dir':'/mr-history/tmp', |
| 191 | 'mapreduce.jobhistory.done-dir':'/mr-history/done', |
| 192 | 'mapreduce.jobhistory.address':localips[1], |
| 193 | 'mapreduce.jobhistory.webapp.address':localips[1] |
| 194 | }) |
| 195 | pxc('/etc/hbase/conf/hbase-site.xml', { |
| 196 | 'hbase.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181', |
| 197 | 'hbase.rootdir':'hdfs://cl/apps/hbase/data', |
| 198 | 'hbase.cluster.distributed':'true' |
| 199 | }) |
| 200 | pxc('/etc/hive/conf/hive-site.xml', { |
| 201 | 'fs.file.impl.disable.cache':'true', |
| 202 | 'fs.hdfs.impl.disable.cache':'true', |
| 203 | 'hadoop.clientside.fs.operations':'true', |
| 204 | 'hive.auto.convert.join.noconditionaltask.size':'1000000000', |
| 205 | 'hive.auto.convert.sortmerge.join.noconditionaltask':'true', |
| 206 | 'hive.auto.convert.sortmerge.join':'true', |
| 207 | 'hive.enforce.bucketing':'true', |
| 208 | 'hive.enforce.sorting':'true', |
| 209 | 'hive.mapjoin.bucket.cache.size':'10000', |
| 210 | 'hive.mapred.reduce.tasks.speculative.execution':'false', |
| 211 | 'hive.metastore.cache.pinobjtypes':'Table,Database,Type,FieldSchema,Order', |
| 212 | 'hive.metastore.client.socket.timeout':'60s', |
| 213 | 'hive.metastore.local':'true', |
| 214 | 'hive.metastore.uris':'thrift://' + fqdns[2] + ':9083', |
| 215 | 'hive.metastore.warehouse.dir':'/apps/hive/warehouse', |
| 216 | 'hive.optimize.bucketmapjoin.sortedmerge':'true', |
| 217 | 'hive.optimize.bucketmapjoin':'true', |
| 218 | 'hive.optimize.mapjoin.mapreduce':'true', |
| 219 | 'hive.optimize.reducededuplication.min.reducer':'1', |
| 220 | 'hive.security.authorization.manager':'org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider', |
| 221 | 'hive.semantic.analyzer.factory.impl':'org.apache.hivealog.cli.HCatSemanticAnalyzerFactory', |
| 222 | 'javax.jdo.option.ConnectionDriverName':'com.mysql.jdbc.Driver', |
| 223 | 'javax.jdo.option.ConnectionPassword': mysqlpw, |
| 224 | 'javax.jdo.option.ConnectionURL':'jdbc:mysql://localhost:3306/metastore?createDatabaseIfNotExist=true', |
| 225 | 'javax.jdo.option.ConnectionUserName':'root' |
| 226 | }) |
| 227 | if myid == 2: |
| 228 | pxc('/etc/cdap/conf/cdap-site.xml', { |
| 229 | 'zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181/\${root.namespace}', |
| 230 | 'router.server.address':localips[2], |
| 231 | 'explore.enabled':'true', |
| 232 | 'enable.unrecoverable.reset':'true', |
| 233 | 'kafka.seed.brokers':localips[2] + ':9092', |
| 234 | 'app.program.jvm.opts':'-XX:MaxPermSize=128M \${twill.jvm.gc.opts} -Dhdp.version=$HDPVER -Dspark.yarn.am.extraJavaOptions=-Dhdp.version=$HDPVER' |
| 235 | }) |
| 236 | with open('/etc/hbase/conf/regionservers', 'w') as f: |
| 237 | for ip in localips: |
| 238 | f.write('{ip}\n'.format(ip=ip)) |
| 239 | with open('/etc/hbase/conf/hbase-env.sh', 'a') as f: |
| 240 | f.write("export HBASE_MANAGES_ZK=false\n") |
| 241 | with open('/etc/zookeeper/conf/zoo.cfg', 'a') as f: |
| 242 | f.write("server.1={L1}:2888:3888\nserver.2={L2}:2888:3888\nserver.3={L3}:2888:3888\nmaxClientCnxns=0\nautopurge.purgeInterval=6\n".format(L1=localips[0],L2=localips[1],L3=localips[2])) |
| 243 | with open('/etc/clustermembers', 'w') as f: |
| 244 | f.write("export me={me}\n".format(me=myid)) |
| 245 | for idx in range(len(localips)): |
| 246 | f.write("export n{i}={ip}\n".format(i=idx, ip=localips[idx])) |
| 247 | f.write("export N{i}={ip}\n".format(i=idx, ip=floatingips[idx])) |
| 248 | with open('/etc/hadoop/conf/slaves', 'w') as f: |
| 249 | for idx in range(len(localips)): |
| 250 | if idx != myid: |
| 251 | f.write("{x}\n".format(x=localips[idx])) |
| 252 | if myid < 3: |
| 253 | with open('/var/lib/zookeeper/myid', 'w') as f: |
| 254 | f.write("{id}".format(id=(myid + 1))) |
| 255 | os.system('service zookeeper-server restart') |
| 256 | for ip in localips: |
| 257 | os.system("su - hdfs -c \"ssh -o StrictHostKeyChecking=no -o NumberOfPasswordPrompts=0 {ip} echo Connectivity to {ip} verified\"".format(ip=ip)) |
| 258 | !EOF |
| 259 | |
| 260 | python /tmp/init.py |
| 261 | |
| 262 | . /etc/clustermembers |
| 263 | waitfor() { |
| 264 | while ( ! nc $1 $2 </dev/null ) |
| 265 | do |
| 266 | echo waiting for $1 port $2 |
| 267 | sleep 30 |
| 268 | done |
| 269 | } |
| 270 | # journal nodes are on port 8485 |
| 271 | if [ $me -lt 3 ] |
| 272 | then |
| 273 | su - hdfs -c '$HADOOP_HOME/sbin/hadoop-daemon.sh start journalnode' |
| 274 | waitfor $n0 8485 |
| 275 | waitfor $n1 8485 |
| 276 | waitfor $n2 8485 |
| 277 | fi |
| 278 | if [ $me -eq 0 -a "$setupdone" = "" ] |
| 279 | then |
| 280 | su - hdfs -c 'hdfs namenode -format -nonInteractive' |
| 281 | su - hdfs -c 'hdfs zkfc -formatZK' |
| 282 | fi |
| 283 | if [ $me -eq 1 -a "$setupdone" = "" ] |
| 284 | then |
| 285 | waitfor $n0 8020 |
| 286 | su - hdfs -c 'hdfs namenode -bootstrapStandby -nonInteractive' |
| 287 | su - yarn -c 'yarn resourcemanager -format-state-store' |
| 288 | fi |
| 289 | if [ $me -eq 0 -o $me -eq 1 ] |
| 290 | then |
| 291 | su - hdfs -c '$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc' |
| 292 | su - hdfs -c '$HADOOP_HOME/sbin/hadoop-daemon.sh start namenode' |
| 293 | fi |
| 294 | su - hdfs -c '$HADOOP_HOME/sbin/hadoop-daemon.sh start datanode' |
| 295 | if [ $me -eq 1 -o $me -eq 2 ] |
| 296 | then |
| 297 | su - yarn -c '/usr/hdp/current/hadoop-yarn-nodemanager/sbin/yarn-daemon.sh start resourcemanager' |
| 298 | fi |
| 299 | su - yarn -c '/usr/hdp/current/hadoop-yarn-nodemanager/sbin/yarn-daemon.sh start nodemanager' |
| 300 | waitfor $n0 8020 |
| 301 | waitfor $n1 8020 |
| 302 | su - hdfs -c 'hdfs dfsadmin -safemode wait' |
| 303 | if [ $me -eq 1 ] |
| 304 | then |
| 305 | if [ "$setupdone" = "" ] |
| 306 | then |
| 307 | su - hdfs -c 'hdfs dfs -mkdir -p /mr-history/tmp' |
| 308 | su - hdfs -c 'hdfs dfs -chmod -R 1777 /mr-history/tmp' |
| 309 | su - hdfs -c 'hdfs dfs -mkdir -p /mr-history/done' |
| 310 | su - hdfs -c 'hdfs dfs -chmod -R 1777 /mr-history/done' |
| 311 | su - hdfs -c 'hdfs dfs -chown -R mapred:hdfs /mr-history' |
| 312 | su - hdfs -c 'hdfs dfs -mkdir -p /app-logs' |
| 313 | su - hdfs -c 'hdfs dfs -chmod -R 1777 /app-logs' |
| 314 | su - hdfs -c 'hdfs dfs -chown yarn:hdfs /app-logs' |
| 315 | su - hdfs -c 'hdfs dfs -mkdir -p /apps/hbase/staging /apps/hbase/data' |
| 316 | su - hdfs -c 'hdfs dfs -chown hbase:hdfs /apps/hbase/staging /apps/hbase/data' |
| 317 | su - hdfs -c 'hdfs dfs -chmod 711 /apps/hbase/staging' |
| 318 | su - hdfs -c 'hdfs dfs -chmod 755 /apps/hbase/data' |
| 319 | su - hdfs -c 'hdfs dfs -chown hdfs:hdfs /apps/hbase' |
| 320 | su - hdfs -c 'hdfs dfs -mkdir -p /user/yarn' |
| 321 | su - hdfs -c 'hdfs dfs -chown yarn:yarn /user/yarn' |
| 322 | su - hdfs -c 'hdfs dfs -mkdir -p /cdap/tx.snapshot' |
| 323 | su - hdfs -c 'hdfs dfs -chown yarn:yarn /cdap /cdap/tx.snapshot' |
| 324 | su - hdfs -c 'hdfs dfs -mkdir -p /user/hive /apps/hive/warehouse /tmp/hive' |
| 325 | su - hdfs -c 'hdfs dfs -chown -R hive:hadoop /user/hive /apps/hive /tmp/hive' |
| 326 | su - hdfs -c 'hdfs dfs -chmod -R 775 /apps/hive' |
| 327 | su - hdfs -c 'hdfs dfs -chmod -R 777 /tmp/hive' |
| 328 | fi |
| 329 | su - mapred -c '/usr/hdp/current/hadoop-mapreduce-historyserver/sbin/mr-jobhistory-daemon.sh start historyserver' |
| 330 | su - hbase -c '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh start master' |
| 331 | fi |
| 332 | while [ "" != "$( echo get /hbase/master | hbase zkcli 2>&1 | grep 'Node does not exist: /hbase/master')" ] |
| 333 | do |
| 334 | echo Waiting for hbase master to come up |
| 335 | sleep 30 |
| 336 | done |
| 337 | su - hbase -c '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh start regionserver' |
| 338 | |
| 339 | if [ $me -eq 2 ] |
| 340 | then |
| 341 | if [ "$setupdone" = "" ] |
| 342 | then |
| 343 | su - hive -c '/usr/hdp/current/hive-metastore/bin/schematool -initSchema -dbType mysql' |
| 344 | fi |
| 345 | su - hive -c 'nohup /usr/hdp/current/hive-metastore/bin/hive --service metastore >>/var/log/hive/hive.out 2>>/var/log/hive/hive.log </dev/null &' |
| 346 | (cd /bin; wget https://raw.githubusercontent.com/caskdata/cdap-monitoring-tools/develop/nagios/check_cdap/bin/check_cdap) |
| 347 | chmod 755 /bin/check_cdap |
Lusheng Ji | afe7fb0 | 2017-10-17 09:35:34 -0400 | [diff] [blame] | 348 | wget -qO- $CODE_SOURCE/${CODE_VERSION}/cloud_init/instconsulagentub16.sh >/tmp/cinst.sh |
Andrew Gauld | 4e7e20e | 2017-08-22 13:33:45 +0000 | [diff] [blame] | 349 | bash /tmp/cinst.sh <<!EOF |
| 350 | { |
| 351 | "bind_addr": "0.0.0.0", |
| 352 | "client_addr": "0.0.0.0", |
| 353 | "advertise_addr": "$n2", |
| 354 | "data_dir": "/opt/consul/data", |
| 355 | "datacenter": "$DATACENTER", |
| 356 | "http_api_response_headers": { |
| 357 | "Access-Control-Allow-Origin": "*" |
| 358 | }, |
| 359 | "rejoin_after_leave": true, |
| 360 | "server": false, |
| 361 | "ui": false, |
| 362 | "enable_syslog": true, |
| 363 | "log_level": "info", |
| 364 | "service": { |
| 365 | "id": "$REGISTERED_NAME", |
| 366 | "name": "$REGISTERED_NAME", |
| 367 | "address": "$N2", |
| 368 | "port": 11015, |
| 369 | "checks": [ |
| 370 | { |
| 371 | "script": "/bin/check_cdap", |
| 372 | "interval": "60s" |
| 373 | } |
| 374 | ] |
| 375 | } |
| 376 | } |
| 377 | !EOF |
| 378 | for i in $(cd /etc/init.d; echo *cdap*) |
| 379 | do |
| 380 | service $i start |
| 381 | done |
| 382 | fi |
| 383 | |
| 384 | if [ "$setupdone" = "" ] |
| 385 | then |
| 386 | echo setupdone=true >>/etc/clustermembers |
| 387 | fi |