1.安装ntp
rpm -qa|grep ntp
yum install ntp -y
2.删除mariadb
rpm -aq | grep mariadb
rpm -e --nodeps mariadb-libs- 版本
3.安装mysql
rpm -ivh mysql-community-common
... libs
... libs-compat
... client
... server
4.修改主机名
hostnamectl set-hostname xxx
bash
5.关闭防火墙
systemctl status firewalld
systemctl stop firewalld
systemctl disable firewalld
6.hosts映射
vi /etc/hosts
7.修改时区
tzselect
TZ="Asia/Shanghai";export TZ
8.屏蔽默认server,设master为本地时钟源,服务器层级设为10
vi /etc/ntp.conf
server 127.127.0.1
fudge 127.127.0.1 stratum 10
让硬件时间与系统时间同步
vi /etc/sysconfig/ntpd
OPTIONS="-g"
SYNC_HWCLOCK=yes
9.主节点开启ntp,从节点同步
service ntpd start
crontab -e
*/30 10-17 * * * /usr/sbin/ntpdate master
10.免密
ssh-keygen -t rsa 回车三次
ssh-copy-id master
11.环境变量
vi /etc/profile
export JAVA_HOME=/usr/software/java/jdk1.8.0_91
export PATH=$PATH:$JAVA_HOME/bin
export ZOOKEEPER_HOME=/usr/software/zookeeper/zookeeper-3.4.14
export PATH=$PATH:$ZOOKEEPER_HOME/bin
export HADOOP_HOME=/usr/software/hadoop/hadoop-2.7.7
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
export HIVE_HOME=/usr/software/hive/apache-hive-2.3.4-bin
export PATH=$PATH:$HIVE_HOME/bin
source /etc/profile
12.zookeeper
mv zoo_sample.cfg zoo.cfg
dataDir=/usr/zookeeper/zookeeper-3.4.14/zkdata/
dataLogDir=/usr/zookeeper/zookeeper-3.4.14/zkdatalog/
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
mkdir zkdata
mkdir zkdatalog
cd zkdata
touch myid
bin/zkServer.sh start
13.hadoop
mv $HADOOP_HOME/etc/hadoop
hadoop-env.sh
export HADOOP_HOME=/usr/software/hadoop/hadoop-2.7.7
core-site.xml
fs.default.name
hdfs://master:9000
hadoop.tmp.dir
/usr/hadoop/hadoop-2.7.3/hdfs/tmp
hdfs-site.xml
dfs.namenode.name.dir
/usr/software/hadoop/hadoop-2.7.7/hdfs/name
dfs.datanode.data.dir
/usr/software/hadoop/hadoop-2.7.7/hdfs/data
dfs.replication
2
yarn-env.sh
export HADOOP_HOME=/usr/software/hadoop/hadoop-2.7.7
yarn-site.xml
yarn.resourcemanager.admin.address
master:18141
yarn.nodemanager.aux-services
mapreduce_shuffle
cp mapred-site.xml.template mapred-site.xml
mapreduce.framework.name
yarn
/etc/hadoop/新建master/slaves文件
touch master
master
touch slaves
slave1
slave2
格式化:在master执行
hadoop namenode -format
14.数据仓库
是否安装mysql并开启服务
rpm -aq | grep mysql-community-server
systemctl status mysqld
生成临时密码
grep "temporary password" /var/log/mysqld.log
登录
mysql -uroot -p
set global validate_password_policy=0;
set global validate_password_length=6;
alter user'root'@'localhost'identified by '123456';
grant all privileges on *.* to 'root'@'%' identified by '123456' with grant option;
flush privileges;
hive-env.sh
mv hive-env.sh.template hive-env.sh
export HADOOP_HOME=/usr/software/hadoop/hadoop-2.7.7
hive-site.xml
--------------------------------master-------------------------------------
hive.metastore.warehouse.dir
/user/hive_remote/warehouse
hive.metastore.local
false
hive.metastore.uris
thrift://slave1:9083
--------------------------------slave1----------------------------------
hive.metastore.warehouse.dir
/user/hive_remote/warehouse
javax.jdo.option.ConnectionURL
jdbc:mysql://slave2:3306/hive?createDatabaseIfNotExist=true&characterEncoding=UTF-8&useSSL=false
JDBC connect string for a JDBC metastore
javax.jdo.option.ConnectionDriverName
com.mysql.jdbc.Driver
javax.jdo.option.ConnectionUserName
root
javax.jdo.option.ConnectionPassword
123456
hive.metastore.schema.verification
false
datanucleus.schema.autoCreateALL
true
mysql lib包
cp mysql-connector-java-5.1.47-bin.jar /usr/hive/apache-hive-2.1.1-bin/lib
jline冲突
cp jline-2.12.jar /usr/hadoop/hadoop-2.7.3/share/hadoop/yarn/lib
slave1上执行,初始化数据库
schematool -dbType mysql -initSchema
slave1上执行
cd /usr/hive/apache-hive-2.1.1-bin
bin/hive --service metastore
master上执行
cd /usr/hive/apache-hive-2.1.1-bin
bin/hive