一、基础环境准备
1、下载安装包(均使用当前最新的稳定版本,截止至2017年05月24日)
1)jdk-8u131
2)hadoop-2.7.3
3)hbase-1.2.5
4)zookeeper-3.4.10
以上下载地址请点击“阅读原文”
2、修改hosts文件(使用的三台集群主机默认IP为192.168.0.100、192.168.0.101、192.168.0.102)
- # vim /etc/hosts
-
- 添加以下信息
-
- 192.168.0.100 master
-
- 192.168.0.101 slave1
-
- 192.168.0.102 slave2
3、安装JDK
-
-
- # mkdir /usr/java
-
- # tar -zxvf jdk-8u131-linux-x64.tar.gx -C /usr/java
-
-
-
- # scp -r /usr/java slave1:/usr
-
- # scp -r /usr/java slave2:/usr
-
-
-
- # vim /etc/environment
-
- JAVA_HOME=/usr/java/jdk1.8.0_131
-
- JRE_HOME=/usr/java/jdk1.8.0_131/jre
-
- # vim /etc/profile
-
- export JAVA_HOME=/usr/java/jdk1.8.0_131
-
- export JRE_HOME=${JAVA_HOME}/jre
-
- export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
-
- export PATH=$PATH:$JAVA_HOME/bin
4、设置免密登陆
slave1
- # ssh-keygen -t rsa
-
- # cp ~/.ssh/id_rsa.pub ~/.ssh/slave1_id_rsa.pub
-
- # scp ~/.ssh/slave1_id_rsa.pub master:~/.ssh/
slave2
- # ssh-keygen -t rsa
-
- # cp ~/.ssh/id_rsa.pub ~/.ssh/slave2_id_rsa.pub
-
- # scp ~/.ssh/slave2_id_rsa.pub master:~/.ssh/
master
- # ssh-keygen -t rsa
-
- # cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
-
- # cat ~/.ssh/slave1_id_rsa.pub >> ~/.ssh/authorized_keys
-
- # cat ~/.ssh/slave2_id_rsa.pub >> ~/.ssh/authorized_kyes
-
-
-
- # scp ~/.ssh/authorized_keys slave1:~/.ssh
-
- # scp ~/.ssh/authorized_keys slave2:~/.ssh
5、关闭防火墙及SELINUX
-
-
- # systemctl stop firewalld.service
-
- # systemctl disable firewalld.service
-
-
-
- # vim /etc/selinux/config
-
-
-
- #SELINUX=enforcing
-
- #SELINUXTYPE=targeted
-
-
-
- SELINUX=disable
二、Hadoop环境搭建
1、解压缩安装包及创建基本目录
- # tar -zxvf hadoop-2.7.3-x64.tar.gz -C /usr
-
- # cd /usr/hadoop-2.7.3
-
- # mkdir tmp logs hdf hdf/data hdf/name
2、修改hadoop配置文件
-
-
- # vim /usr/hadoop-2.7.3/etc/hadoop/slaves
-
-
-
- slave1
-
- slave2
-
-
-
- # vim /usr/hadoop-2.7.3/etc/hadoop/core-site.xml
-
-
-
- <property>
-
- <name>fs.default.name</name>
-
- <value>hdfs://master:9000</value>
-
- </property>
-
- <property>
-
- <name>hadoop.tmp.dir</name>
-
- <value>file:/usr/hadoop-2.7.3/tmp</value>
-
- </property>
-
-
-
- # vim /usr/hadoop-2.7.3/etc/hadoop/hdfs-site.xml
-
-
-
- <property>
-
- <name>dfs.datanode.data.dir</name>
-
- <value>/usr/hadoop-2.7.3/hdf/data</value>
-
- <final>true</final>
-
- </property>
-
- <property>
-
- <name>dfs.namenode.name.dir</name>
-
- <value>/usr/hadoop-2.7.3/hdf/name</value>
-
- <final>true</final>
-
- </property>
-
-
-
- # cp /usr/hadoop-2.7.3/etc/hadoop/mapred-site.xml.template /usr/hadoop-2.7.3/etc/hadoop/mapred-site.xml
-
- # vim /usr/hadoop-2.7.3/etc/hadoop/mapred-site.xml
-
-
-
- <property>
-
- <name>mapreduce.framework.name</name>
-
- <value>yarn</value>
-
- </property>
-
- <property>
-
- <name>mapreduce.jobhistory.address</name>
-
- <value>master:10020</value>
-
- </property>
-
- <property>
-
- <name>mapreduce.jobhistory.webapp.address</name>
-
- <value>master:19888</value>
-
- </property>
-
-
-
- # vim /usr/hadoop-2.7.3/etc/hadoop/yarn-site.xml
-
-
-
- <property>
-
- <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
-
- <value>org.apache.mapred.ShuffleHandler</value>
-
- </property>
-
- <property>
-
- <name>yarn.resourcemanager.address</name>
-
- <value>master:8032</value>
-
- </property>
-
- <property>
-
- <name>yarn.resourcemanager.scheduler.address</name>
-
- <value>master:8030</value>
-
- </property>
-
- <property>
-
- <name>yarn.resourcemanager.resource-tracker.address</name>
-
- <value>master:8031</value>
-
- </property>
-
- <property>
-
- <name>yarn.resourcemanager.admin.address</name>
-
- <value>master:8033</value>
-
- </property>
-
- <property>
-
- <name>yarn.resourcemanager.webapp.address</name>
-
- <value>master:8088</value>
-
- </property>
3、复制hadoop到slave节点
- # scp -r /usr/hadoop-2.7.3 slave1:/usr
-
- # scp -r /usr/hadoop-2.7.3 slave2:/usr
4、配置 master 和 slave 的 hadoop 环境变量
- # vim /etc/profile
-
-
-
- export HADOOP_HOME=/usr/hadoop-2.7.3
-
- export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
-
- export HADOOP_LOG_DIR=/usr/hadoop-2.7.3/logs
-
- export YARN_LOG_DIR=$HADOOP_LOG_DIR
-
-
-
-
-
- # source /etc/profile
-
-
- # vim ~/.bashrc
-
-
-
- export HADOOP_PREFIX=/usr/hadoop-2.7.3/
5、格式化 namenode
- # /usr/hadoop-2.7.3/sbin/hdfs namenode -format
6、启动 hadoop
- # /usr/hadoop-2.7.3/sbin/start-all.sh
到这一步已经成功完成了hadoop环境的搭建
三、Zookeeper 环境搭建
1、解压缩 zookeeper 安装包,并建立基本目录
- # tar -zxvf zookeeper-3.4.10.tar.gz -C /usr
-
- # mkdir /usr/zookeeper-3.4.10/data
2、修改配置文件
-
-
- # cp /usr/zookeeper-3.4.10/conf/zoo-sample.cfg /usr/zookeeper-3.4.10/conf/zoo.cfg
-
-
-
- # vim /usr/zookeeper-3.4.10/conf/zoo.cfg
-
-
-
- dataDir=/usr/zookeeper-3.4.10/data
-
- server.1=master:2888:3888
-
- server.2=slave1:2888:3888
-
- server.3=slave2:2888:3888
-
-
-
- # touch /usr/zookeeper-3.4.10/data/myid
-
- # vim /usr/zookeeper-3.4.10/myid
-
-
-
- 1(master节点添加)
-
- 2(slave2节点添加)
-
- 3(slave3节点添加)
3、启动zookeeper
-
-
- # cd /usr/zookeeper-3.4.10/bin
-
- # ./zkServer.sh start
-
- # ./zkServer.sh status
到这一步完成了zookeeper环境的搭建
四、HBase环境搭建
1、解压缩 hbase 安装包
- # tar -zxvf hbase-1.2.5-bin.star.gz -C /usr
-
- # mkdir /usr/hbase-1.2.5-bin/logs
2、修改配置文件
- # vim /usr/hbase-1.2.5/conf/hbase-env.sh
-
-
-
- export JAVA_HOME=/usr/java/jdk1.8.0_131
-
- export HBASE_LOG_DIR=${HBASE_HOME}/logs
-
- export HBASE_MANAGES_ZK=false
3、修改regionservers
- # vim /usr/hbase-1.2.5/conf/regionservers
-
-
-
- master
-
- slave1
-
- slave2
4、修改配置文件
- # vim /usr/hbase-1.2.5/conf/hbase-site.xml
-
-
-
- <property>
-
- <name>hbase.rootdir</name>
-
- <value>hdfs://master:9000/hbase</value>
-
- </property>
-
- <property>
-
- <name>hbase.cluster.distributed</name>
-
- <value>true</value>
-
- </property>
-
- <property>
-
- <name>hbase.zookeeper.quorum</name>
-
- <value>master,slave1,slave2</value>
-
- </property>
-
- <property>
-
- <name>hbase.zookeeper.property.dataDir</name>
-
- <value>/usr/zookeeper-3.4.10/data</value>
-
- </property>
-
- <property>
-
- <name>hbase.master</name>
-
- <value>hdfs://master:60000</value>
-
- </property>
5、复制hbase到slave中
- # scp -r /usr/hbase-1.2.5 slave1:/usr
-
- # scp -r /usr/hbase-1.2.5 slave2:/usr
6、启动hbase
- # /usr/hbase-1.2.5/bin/start-hbase.sh
到这一步hbase环境搭建完成