centOS 설치 hadop - 2.2.0

    :  jdk

1.  hadoop:hadoop-2.2.0.tar.gz
	  :http://mirrors.cnnic.cn/apache/hadoop/common/
2. hadoop      /usr  
	[root@localhost usr]# tar -xzvf hadoop-2.2.0.tar.gz
3.     hadoop 
	[root@localhost usr]# adduser hadoop
4.   ssh                    
[root@localhost ~]# ssh-keygen -t  rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
a8:7a:3e:f6:92:85:b8:c7:be:d9:0e:45:9c:d1:36:3b [email protected]
[root@localhost ~]# 
[root@localhost ~]# cd ..
[root@localhost /]# cd root
[root@localhost ~]# ls
anaconda-ks.cfg  Desktop  install.log  install.log.syslog
[root@localhost ~]# cd .ssh
[root@localhost .ssh]# cat id_rsa.pub > authorized_keys
[root@localhost .ssh]# 

[root@localhost .ssh]# ssh localhost
The authenticity of host 'localhost (127.0.0.1)' can't be established.
RSA key fingerprint is 41:c8:d4:e4:60:71:6f:6a:33:6a:25:27:62:9b:e3:90.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'localhost' (RSA) to the list of known hosts.
Last login: Tue Jun 21 22:40:31 2011
[root@localhost ~]# 

5.            exportENV.sh
exportHADOOP_PREFIX="/usr/hadoop-2.2.0"
export PATH=$PATH:$HADOOP_PREFIX/bin
export PATH=$PATH:$HADOOP_PREFIX/sbin

exportHADOOP_MAPRED_HOME=${HADOOP_PREFIX}
export HADOOP_COMMON_HOME=${HADOOP_PREFIX}
export HADOOP_HDFS_HOME=${HADOOP_PREFIX}
export YARN_HOME=${HADOOP_PREFIX}

6.  java       /root/.bashrc   
vim /root/.bashrc
export JAVA_HOME=/usr/lib/jvm/jre-1.7.0-openjdk/
export PATH=${JAVA_HOME}/lib:${PATH}

               
source /root/.bashrc

7.         :/usr/hadoop-2.2.0/etc/hadoop                  ,             xxx.template     ,                .template      

7.1.   core-site.xml

<configuration>
	<property>
		<name>fs.default.name</name>
		<value>hdfs://localhost:8020</value>
	<final>true</final>
	</property>
</configuration>

7.2.  hdfs-site.xml

<configuration>
	<property>
		<name>dfs.namenode.name.dir</name>
		<value>file:/home/hadoop/workspace/hadoop_space/hadoop23/dfs/name</value>
	<final>true</final>
	</property>

	<property>
		<name>dfs.datanode.data.dir</name>
		<value>file:/home/hadoop/workspace/hadoop_space/hadoop23/dfs/data</value>
		<final>true</final>
	</property>

	<property>
		<name>dfs.replication</name>
		<value>1</value>
	</property>

	<property>
			<name>dfs.permissions</name>
			<value>false</value>
	</property>

</configuration>

7.3.   mapred-site.xml

<configuration>
	<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
	</property>

	<property>
		<name>mapred.system.dir</name>
		<value>file:/home/hadoop/workspace/hadoop_space/hadoop23/mapred/system</value>
		<final>true</final>
	</property>

	<property>
		<name>mapred.local.dir</name>
		<value>file:/home/hadoop/workspace/hadoop_space/hadoop23/mapred/local</value>
		<final>true</final>
	</property>

</configuration>


7.4.  yarn-site.xml

<configuration>
	<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce.shuffle</value>
	</property>
	<property>
		<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
		<value>org.apache.hadoop.mapred.ShuffleHandler</value>
	</property>

</configuration> 

8.    namenode     (/usr/hadoop-2.2.0/) 
# ./bin/hdfs namenode –format

9.      

# ./sbin/hadoop-daemon.sh start namenode

# ./sbin/hadoop-daemon.sh start datanode

      :

# ./sbin/start-dfs.sh

10.   Yarn     

# ./sbin/yarn-daemon.sh start resourcemanager

# ./sbin/yarn-daemon.sh start nodemanager

     :

# start-yarn.sh

11.          

# jps


2539 NameNode
2744 NodeManager
3075 Jps
3030 DataNode
2691 ResourceManager

12.  UI

   localhost:8088         


	

좋은 웹페이지 즐겨찾기