hadoop伪分布式部署

作者:Garany 发布于:2018-07-03 分类:破万卷书
Hadoop
1. HDFS
1.1 NameNode 主节点
1.2 DataNode 从节点
1.3 SecondaryNameNode 主节点
2. MapReduce
2.1 JobTracker 主节点
2.2 TaskTracker 从节点

1.配置免密码登录
# ssh-keygen -t rsa
# mv /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
2.安装java
# cd /opt/
# tar zxvf jdk-8u91-linux-x64.tar.gz
# mv jdk1.8.0_91/ java
# vim /etc/profile.d/java.sh
JAVA_HOME=/opt/java
PATH=$PATH:$JAVA_HOME/bin
CLASSPATH=/opt/java/lib/dt.jar:/opt/java/lib/tools.jar
export JAVA_HOME PATH  CLASSPATH
# source /etc/profile.d/java.sh
# java -version
3.安装hadoop
# wget http://mirrors.hust.edu.cn/apache/hadoop/common/hadoop-3.1.0/hadoop-3.1.0.tar.gz -P /usr/local/src/
# cd /usr/local/src/
# tar zxvf hadoop-3.1.0.tar.gz 
# mv hadoop-3.1.0 /opt/hadoop
# vim /etc/profile.d/hadoop.sh
export HADOOP_HOME=/opt/hadoop
export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
# source /etc/profile.d/hadoop.sh 
# vim /opt/hadoop/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/opt/java
# hadoop version
4.配置
4.1core-site.xml 
# mkdir -p /data/hadoop
# vim /opt/hadoop/etc/hadoop/core-site.xml
<configuration>
  <property>
        <name>fs.defaultFS</name>
        <value>hdfs://localhost:9000</value>
  </property>
<property>
  <name>hadoop.tmp.dir</name>
        <value>/data/hadoop/</value>
  </property>
</configuration>
4.2hdfs-site.xml 
# vim /opt/hadoop/etc/hadoop/hdfs-site.xml 
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
4.3mapred-site.xml
# vim /opt/hadoop/etc/hadoop/mapred-site.xml
<configuration>
  <property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
  </property>
</configurati
4.4yarn-site.xml 
# vim /opt/hadoop/etc/hadoop/yarn-site.xml
<configuration>
  <property>
<name>yarn.resourcemanager.hostname</name>
<value>localhost</value>
  </property>
  <property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
  </property>
  <property>
<name>yarn.resourcemanager.webapp.address</name>
<value>0.0.0.0:8088</value>
  </property>
</configuration>
5.初始化
# hdfs namenode -format
# vim start-dfs.sh 
# vim stop-dfs.sh 
HDFS_DATANODE_USER=root  
HDFS_DATANODE_SECURE_USER=hdfs  
HDFS_NAMENODE_USER=root  
HDFS_SECONDARYNAMENODE_USER=root
# vim start-yarn.sh 
# vim stop-yarn.sh 
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root
6.启动
6.1启动dfs
#  start-dfs.sh
# jps
13840 DataNode
14069 SecondaryNameNode
13711 NameNode
14223 Jps
6.2启动yarn
# start-yarn.sh 
# jps
13840 DataNode
14069 SecondaryNameNode
14361 ResourceManager
14843 Jps
13711 NameNode
14607 NodeManager
# http://localhost:8088
# http://localhost:9870
7.停止
# stop-all.sh

我来说说