하둡 클러스터 도전기(2) - HA 구성

code_able·2023년 2월 10일
0

저난번에 가뿐하게 StandAlone을 해냈다.
이제 분산 클러스터를 만들어 보자.
이거 하다가 뇌가 분산 될뻔 했다.

HA 도전기 시작

서버는 총 4대다
아래와 같이 구성을 할 계획이다
master

  • zookeeper
  • active namenode
  • datanode
  • journalnode
  • active resourcemanager

slave1

  • zookeeper
  • standby namenode
  • datanode
  • journalnode
  • stanby resourcemanager

slave2

  • zookeeper
  • datanode
  • journalnode

slave3

  • datanode

기본 컨테이너 실행

sudo docker run -ti --name hadoop-base ubuntu:20.04 /bin/bash

zookeeper 설치

mkdir -p /zookeeper
cd /zookeeper
wget https://dlcdn.apache.org/zookeeper/zookeeper-3.8.0/apache-zookeeper-3.8.0-bin.tar.gz

tar -xvf apache-zookeeper-3.8.0-bin.tar.gz
rm apache-zookeeper-3.8.0-bin.tar.gz

mkdir -p /zookeeper/data

cd /zookeeper/apache-zookeeper-3.8.0-bin/conf

cp zoo_sample.cfg zoo.cfg

vi zoo.cfg
dataDir=/zookeeper/data
maxClientCnxns=60
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888

core-site.xml

<property>
        <name>fs.defaultFS</name>
        <value>hdfs://master</value>
</property>

<property>
        <name>ha.zookeeper.quorum</name>
        <value>master:2181,slave1:2181,slave2:2181</value>
</property>

hdfs-site.xml

mkdir -p /hadoop_home/namenode
mkdir -p /hadoop_home/datanode
mkdir -p /hadoop_home/journalnode

<property>
        <name>dfs.replication</name>
        <value>4</value>
</property>
<property>
        <name>dfs.namenode.name.dir</name>
        <value>/hadoop_home/namenode</value>
</property>
<property>
        <name>dfs.datanode.data.dir</name>
        <value>/hadoop_home/datanode</value>
</property>
<property>
        <name>dfs.journalnode.edits.dir</name>
        <value>/hadoop_home/journalnode</value>
</property>
<property>
        <name>dfs.nameservices</name>
        <value>master</value>
</property>
<property>
        <name>dfs.ha.namenodes.master</name>
        <value>nn1,nn2</value>
</property>
<property>
        <name>dfs.namenode.rpc-address.master.nn1</name>
        <value>master:8020</value>
</property>
<property>
        <name>dfs.namenode.rpc-address.master.nn2</name>
        <value>slave1:8020</value>
</property>
<property>
        <name>dfs.namenode.http-address.master.nn1</name>
        <value>master:50070</value>
</property>
<property>
        <name>dfs.namenode.http-address.master.nn2</name>
        <value>slave1:50070</value>
</property>
<property>
        <name>dfs.namenode.shared.edits.dir</name>
        <value>qjournal://master:8485;slave1:8485;slave2:8485/hadoop-cluster</value>
</property>
<property>
        <name>dfs.client.failover.proxy.provider.master</name>
        <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
       <name>dfs.ha.fencing.methods</name>
       <value>shell(/bin/true)</value>
</property>
<property>
        <name>dfs.ha.automatic-failover.enabled</name>
        <value>true</value>
</property>
<property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
</property>
<property>
    <name>dfs.namenode.http-address</name>
    <value>0.0.0.0:50070</value>
</property>

yarn-site.xml

mkdir -p /hadoop_home/yarn
mkdir -p /hadoop_home/yarn/nm-local-dir
mkdir -p /hadoop_home/yarn/system
mkdir -p /hadoop_home/yarn/system/rmstore

<property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
</property>
<property>
        <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
        <name>yarn.nodemanager.local-dirs</name>
        <value>/hadoop_home/yarn/nm-local-dir</value>
</property>
<property>
        <name>yarn.resourcemanager.fs.state-store.uri</name>
        <value>/hadoop_home/yarn/system/rmstore</value>
</property>
<property>
        <name>yarn.resourcemanager.hostname</name>
        <value>master1</value>
</property>
<property>
        <name>yarn.web-proxy.address</name>
        <value>0.0.0.0:8089</value>
</property>
<property>
        <name>yarn.resourcemanager.ha.enabled</name>
        <value>true</value>
</property>
<property>
        <name>yarn.resourcemanager.cluster-id</name>
        <value>clusrm</value>
</property>
<property>
        <name>yarn.resourcemanager.hostname</name>
        <value>master</value>
</property>
<property>
        <name>yarn.web-proxy.address</name>
        <value>0.0.0.0:8089</value>
</property>
<property>
        <name>yarn.resourcemanager.ha.enabled</name>
        <value>true</value>
</property>
<property>
        <name>yarn.resourcemanager.cluster-id</name>
        <value>clusrm</value>
</property>
<property>
       <name>yarn.resourcemanager.ha.rm-ids</name>
       <value>rm1,rm2</value>
</property>
<property>
       <name>yarn.resourcemanager.hostname.rm1</name>
       <value>master</value>
</property>
<property>
       <name>yarn.resourcemanager.hostname.rm2</name>
       <value>slave1</value>
</property>
<property>
       <name>yarn.resourcemanager.address.rm1</name>
       <value>master:8032</value>
</property>
<property>
       <name>yarn.resourcemanager.address.rm2</name>
       <value>slave1:8032</value>
</property>
<property>
       <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
       <value>master:8031</value>
</property>
<property>
       <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
       <value>slave1:8031</value>
</property>
<property>
       <name>yarn.resourcemanager.admin.address.rm1</name>
       <value>master:8033</value>
</property>
<property>
       <name>yarn.resourcemanager.admin.address.rm2</name>
       <value>slave1:8033</value>
</property>
<property>
       <name>yarn.resourcemanager.scheduler.address.rm1</name>
       <value>master:8030</value>
</property>
<property>
       <name>yarn.resourcemanager.scheduler.address.rm2</name>
       <value>slave1:8030</value>
</property>
<property>
       <name>yarn.resourcemanager.webapp.address.rm1</name>
       <value>master:8088</value>
</property>
<property>
       <name>yarn.resourcemanager.webapp.address.rm2</name>
       <value>slave1:8088</value>
</property>
<property>
       <name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
       <value>true</value>
</property>
<property>
       <name>yarn.resourcemanager.zk-address</name>
       <value>master:2181,slave1:2181,slave2:2181</value>
</property>
<property>
      <name>yarn.nodemanager.resource.memory-mb</name>
      <value>28672</value>
</property>
<property>
      <name>yarn.nodemanager.resource.cpu-vcores</name>
      <value>8</value>
</property>
<property>
      <name>yarn.scheduler.maximum-allocation-mb</name>
      <value>8192</value>
</property>
<property>
      <name>yarn.scheduler.minimum-allocation-mb</name>
      <value>1024</value>
</property>
<property>
      <name>yarn.nodemanager.vmem-pmem-ratio</name>
      <value>2.1</value>
</property>
<property>
      <name>yarn.nodemanager.pmem-check-enabled</name>
      <value>true</value>
</property>
<property>
      <name>yarn.nodemanager.vmem-check-enabled</name>
      <value>false</value>
</property>

docker image 생성

sudo docker commit hadoop-base:2.7.7

docker container 실행

sudo docker run -ti \
--hostname master \
--name master \
--network hadoop-cluster-net \
-p 50070:50070 \
-p 9083:9083 \
-p 2181:2181 \
-p 8020:8020 \
-p 8033:8033 \
-p 16010:16010 \
-e LC_ALL=ko_KR.UTF-8 \
-e TZ=Asia/Seoul \
hadoop-base:2.7.7 /bin/bash

sudo docker run -ti \
--hostname slave1 \
--name slave1 \
--network hadoop-cluster-net \
-p 50070:50070 \
-p 9083:9083 \
-p 2181:2181 \
-p 8020:8020 \
-p 8033:8033 \
-e LC_ALL=ko_KR.UTF-8 \
-e TZ=Asia/Seoul \
hadoop-base:2.7.7 /bin/bash

sudo docker run -ti \
--hostname slave2 \
--name slave2 \
--network hadoop-cluster-net \
-p 2181:2181 \
-e LC_ALL=ko_KR.UTF-8 \
-e TZ=Asia/Seoul \
hadoop-base:2.7.7 /bin/bash

sudo docker run -ti \
--hostname slave3 \
--name slave3 \
--network hadoop-cluster-net \
-p 2181:2181 \
-e LC_ALL=ko_KR.UTF-8 \
-e TZ=Asia/Seoul \
hadoop-base:2.7.7 /bin/bash

실행

# host실행
vi /etc/hosts
0.0.0.0     master
0.0.0.0     slave1
0.0.0.0     slave2
0.0.0.0     slave3

# slaves 세팅
vi /hadoop_home/hadoop-2.7.7/etc/hadoop/slaves
master
slave1
slave2
slave3

# ssh server 실행
master /usr/sbin/sshd
slave1 /usr/sbin/sshd
slave2 /usr/sbin/sshd
slave3 /usr/sbin/sshd

# zookeeper id 설정
master vi /zookeeper/data/myid > 1
slave1 vi /zookeeper/data/myid > 2
slave2 vi /zookeeper/data/myid > 3

# zookeeper 실행
master /zookeeper/apache-zookeeper-3.8.0-bin/bin/zkServer.sh start
slave1 /zookeeper/apache-zookeeper-3.8.0-bin/bin/zkServer.sh start
slave2 /zookeeper/apache-zookeeper-3.8.0-bin/bin/zkServer.sh start

# zookeeper 포맷
master /hadoop_home/hadoop-2.7.7/bin/hdfs zkfc -formatZK

# journalnode 실행
master /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start journalnode
slave1 /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start journalnode
slave2 /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start journalnode

# namenode 포맷
master /hadoop_home/hadoop-2.7.7/bin/hdfs namenode -format

# namodenode 실행
master /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start namenode

# failover zookeeper 실행
master /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start zkfc

# datanode 실행
master /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start datanode
slave1 /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start datanode
slave2 /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start datanode
slave3 /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start datanode

# stand by namenode 실행
slave1 /hadoop_home/hadoop-2.7.7/bin/hdfs namenode -bootstrapStandby
slave1 /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start namenode
slave1 /hadoop_home/hadoop-2.7.7/sbin/hadoop-daemon.sh start zkfc

# yarn 실행
start-dfs.sh
start-yarn.sh
안되면 아래 실행 후 다시 실행
stop-dfs.sh
stop-yarn.sh

엄청난 시행 착오 끝에 되었다.
나의 뇌도 분산 되었다.

profile
할수 있다! code able

0개의 댓글