Cloudera CDH 5 Beta 2 기반 QJM HA 설정

  :
       jdk 1.7.0.45   ,
    :http://archive.cloudera.com/cdh5/cdh/5/
      64      Centos 6.5
    
/etc/profile root   
export JAVA_HOME=/usr/java/jdk1.7.0_45
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export HADOOP_HOME=/usr/hadoop/chd5
export HADOOP_PID_DIR=/usr/hadoop/hadoop_pid_dir
export HADOOP_MAPRED_HOME=${HADOOP_HOME}
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export YARN_HOME=${HADOOP_HOME}
export HADOOP_YARN_HOME=${HADOOP_HOME}
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HDFS_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export YARN_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HADOOP_COMMON_LIB_NATIVE_DIR=/usr/hadoop/chd5
export ZOOKEEPER_HOME=/usr/hadoop/zookeeper
export PATH=${JAVA_HOME}/bin:${ZOOKEEPER_HOME}/bin:$PATH
source /etc/profile        
    :
master:192.168.1.10   :namenode JournalNode
master2:192.168.1.9    :namenode JournalNode
slave1:192.168.1.11     :datanode JournalNode
slave2:192.168.1.12     datanode
slave3:192.168.1.13     datanode 
core-site.xml  》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》
<configuration>
        <property>  
                <name>fs.defaultFS</name>  
                <value>hdfs://cluster</value> 
/**fs.defaultFS    hdfs       。       2 NameNode,  NameNode      ,     ,
        ,   ,          ,         NameNode             。**/

        </property>
        <property>
                <name>hadoop.tmp.dir</name>
                <value>/usr/hadoop/tmp</value>// usr/hadoop         tmp
                <description>A base for other temporary directories.</description>
        </property>
        <property>
                <name>dfs.name.dir</name>
                <value>/usr/hadoop/hdfs/name</value>// Hadoop        hdfs     data name
        </property>
        <property>  
                <name>fs.trash.interval</name>  
                <value>10080</value>  
         </property>
         <property>  
                <name>fs.trash.checkpoint.interval</name>  
                <value>10080</value>  
         </property> 
         <property>  
                <name>topology.script.file.name</name>  
                <value>/usr/hadoop/chd5/etc/hadoop/rack.py</value>  //rack.py       
         </property>
         <property>  
                <name>topology.script.number.args</name>  
                <value>6</value>  
         </property>
         <property>   
                <name>hadoop.native.lib</name>   
                <value>false</value>   
                <description>Should native hadoop libraries, if present, be used.</description>   
         </property> 
         <property>  
                <name>hadoop.security.group.mapping</name>  
                <value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value> 
         </property>      

         <property>  
                <name>hadoop.proxyuser.hadoop.hosts</name>  
                <value>*</value>  
         </property>  

         <property>  
                <name>hadoop.proxyuser.hadoop.groups</name>  
                <value>*</value>  
         </property>  
</configuration>
hdfs-site.xml  》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》
<configuration>
        <property>  
               <name>dfs.replication</name>  
               <value>3</value>  
        </property>
        <property>  
                <name>dfs.blocksize</name>  
                <value>16m</value> 
         </property>

         <property>
                <name>dfs.data.dir</name>
                <value>/usr/hadoop/hdfs/data</value>
        </property>
        <property>
                <name>dfs.nameservices</name>
                <value>cluster</value>
        </property>
        <property>
                <name>dfs.ha.namenodes.cluster</name>
                <value>master,master2</value>
        </property>
        <property>
                <name>dfs.namenode.rpc-address.cluster.master</name>
                <value>master:9000</value>
         </property>
         <property>
                <name>dfs.namenode.rpc-address.cluster.master2</name>
                <value>master2:9000</value>
         </property>
         <property>  
                <name>dfs.namenode.http-address.cluster.master</name>  
                <value>master:50070</value>  
          </property>
         <property>
                <name>dfs.namenode.http-address.cluster.master2</name>
                <value>master2:50070</value>
          </property>

          <property>  
                <name>dfs.namenode.secondary.http-address.cluster.master</name>  
                <value>master:50090</value>  
          </property>
          <property>
                <name>dfs.namenode.secondary.http-address.cluster.master2</name>
                <value>master2:50090</value>
           </property>
           <property>
                <name>dfs.namenode.shared.edits.dir</name>
                <value>qjournal://master:8485;master2:8485;slave1:8485/cluster</value>
        </property>
        <property>
                <name>dfs.client.failover.proxy.provider.cluster</name>
                <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
        </property>
         <property>
                 <name>ha.zookeeper.quorum</name>
                 <value>master:2181,slave1:2181,slave2:2181,slave3:2181,master2:2181</value>
         </property>
        <property>
                <name>dfs.ha.fencing.methods</name>
                <value>sshfence</value>
        </property>
        <property>
                <name>dfs.ha.fencing.ssh.private-key-files</name>
                <value>/home/hadoop/.ssh/id_rsa</value>
        </property>
        <property>
                <name>dfs.journalnode.edits.dir</name>
                <value>/usr/hadoop/tmp/journal</value>
        </property>
        <property>
           <name>dfs.ha.automatic-failover.enabled</name>
              <value>true</value>
        </property>
        <property>
                <name>dfs.permissions</name>
                <value>false</value>
        </property>
        <property>  
                <name>dfs.webhdfs.enabled</name>  
                <value>true</value>  
        </property>

        <property>  
                <name>dfs.datanode.max.xcievers</name>  
                <value>1000000</value>  
        </property> 

        <property>  
                <name>dfs.balance.bandwidthPerSec</name>  
                <value>104857600</value>  
                <description>  
                      Specifies the maximum amount of bandwidth that each datanode  can utilize for the balancing purpose in   the number of bytes per second.  
                </description>  
       </property> 
       <property>  
                <name>dfs.hosts.exclude</name>  
                <value>/usr/hadoop/chd5/etc/hadoop/excludes</value>  
                <description>
                        Names a file that contains a list of hosts that are  
                        not permitted to connect to the namenode.  The full pathname of the 
                       file must be specified.  If the value is empty, no hosts are excluded                                                                                    </description>
        </property>
</configuration>
mapred-site.xml   》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》
<configuration>
        <property>
                <name>mapreduce.framework.name</name>  
                <value>yarn</value>  
        </property> 
        <property>  
                <name>mapreduce.jobhistory.address</name>  
                <value>master:10020</value>  
        </property>
        <property>  
                <name>mapreduce.jobhistory.webapp.address</name>  
                <value>master:19888</value>  
        </property>
        <property>  
                <name>mapreduce.output.fileoutputformat.compress</name>  
                <value>true</value>  
        </property>  
        <property>  
                <name>mapreduce.output.fileoutputformat.compress.type</name>  
                <value>BLOCK</value>  
        </property>  
        <property>  
                <name>mapreduce.output.fileoutputformat.compress.codec</name>  
                <value>org.apache.hadoop.io.compress.SnappyCodec</value>  
        </property>  
        <property>  
                <name>mapreduce.map.output.compress</name>  
                <value>true</value>  
        </property>  
        <property>  
                 <name>mapreduce.map.output.compress.codec</name>  
                 <value>org.apache.hadoop.io.compress.SnappyCodec</value>  
        </property>
</configuration>
yarn-site.xml  》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》
<configuration>
        <property>
                <name>mapreduce.framework.name</name>  
                <value>yarn</value>  
        </property> 
        <property>  
                <name>mapreduce.jobhistory.address</name>  
                <value>master:10020</value>  
        </property>
        <property>  
                <name>mapreduce.jobhistory.webapp.address</name>  
                <value>master:19888</value>  
        </property>
        <property>  
                <name>mapreduce.output.fileoutputformat.compress</name>  
                <value>true</value>  
        </property>  
        <property>  
                <name>mapreduce.output.fileoutputformat.compress.type</name>  
                <value>BLOCK</value>  
        </property>  
        <property>  
                <name>mapreduce.output.fileoutputformat.compress.codec</name>  
                <value>org.apache.hadoop.io.compress.SnappyCodec</value>  
        </property>  
        <property>  
                <name>mapreduce.map.output.compress</name>  
                <value>true</value>  
        </property>  
        <property>  
                 <name>mapreduce.map.output.compress.codec</name>  
                 <value>org.apache.hadoop.io.compress.SnappyCodec</value>  
        </property>
</configuration>
rack.py》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》
#!/bin/env python  
  
import sys,os,time  
  
pwd = os.path.realpath( __file__ )  
rack_file = os.path.dirname(pwd) + "/rack.data"  
  
rack_list = [ l.strip().split() for l in open(rack_file).readlines() if len(l.strip().split()) > 1 ]  
rack_map = {}  
for item in rack_list:  
        for host in item[:-1]:  
                rack_map[host] = item[-1]  
rack_map['default'] = 'default' in rack_map and rack_map['default'] or '/default/rack'  
rack_result = [av in rack_map and rack_map[av] or rack_map['default'] for av in sys.argv[1:]]  
#print rack_map, rack_result  
print ' '.join( rack_result )  
  
f = open('/tmp/rack.log','a+')  
f.writelines( "[%s] %sn" % (time.strftime("%F %T"),str(sys.argv)))  
f.close()  
zookeeper  zoo.cfg  》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》
 zookeeper      data        ,  data        myid   server id      1 2 3 4 5 
     server.1,server.2……  
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/usr/hadoop/zookeeper/data
# the port at which the clients will connect
clientPort=2181
server.1=master:2888:3888
server.2=master2:2888:3888   
server.3=slave1:2888:3888 
server.4=slave2:2888:3888
server.5=slave3:2888:3888      
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1

아래 명령 은 집행 순 서 를 엄 격 히 주의 하여 뒤 바 꾸 어 서 는 안 된다!
1. master, master 2, slave 1 에서 명령 을 수행 합 니 다 hadop - daemon. sh  start  journalnode
2. 5 개 노드 에서 zookeeper 를 시작 하여 명령 을 수행 합 니 다: zkServer. sh start 명령
3. master 에서 명령 hdfs 실행  namenode  –format
     마스터 에서 명령 실행  hadoop-daemon.sh  start namenode
4 、 master 2 에서 명령 hdfs namenode 실행  -bootstrapStandby
    master 2 에서 명령 을 실행 합 니 다 hadop - daemon. sh  start  namenode
이때 방문 192.168.1.9: 50070 과 192.168.1.10: 50070 은 모두 standy 상태 이다.
5. zookeeper HA 프로필 을 포맷 하여 명령 을 수행 합 니 다.
hdfs zkfc -formatZK

6. 임의의 namenode 노드 에서 start. sh 명령 중 하 나 는 namenode 의 standy 상태 가 active 가 되 고 다른 하 나 는 standy 상태 가 됩 니 다.
만약 다른 의견 이나 견해 가 있 으 면 모두 가 귀중 한 의견 을 남기 고 제때에 수정 하여 공동으로 진보 하 기 를 바 랍 니 다.

좋은 웹페이지 즐겨찾기