栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 前沿技术 > 大数据 > 大数据系统

hive部署高可用集群-远程连接hive

hive部署高可用集群-远程连接hive

主机名

ip地址

子网掩码

网关

DNS1

 lwm101

192.168.21.101

255.255.255.0

192.168.21.2

8.8.8.8

lwm102

        192.168.21.102

255.255.255.0

192.168.21.2

8.8.8.8

lwm103

192.168.21.103

255.255.255.0

192.168.21.2

8.8.8.8

vi /etc/sysconfig/network-scripts/ifcfg-ens33把该改的改,改输入的输入

service network restart

把lwm101连接CRT操作

vi /etc/hosts

192.168.21.101  lwm101

192.168.21.102  lwm102

192.168.21.103  lwm103

tar -zxvf /opt/w/jdk-8u161-linux-x64.tar.gz -C /opt/m/

mv /opt/m/ jdk1.8.0_65 /opt/m/jdk

tar -zxvf /opt/w/zookeeper-3.4.10.tar.gz -C /opt/m/

cd /opt/m/zookeeper-3.4.10/conf/

cp zoo_sample.cfg zoo.cfg

dataDir=/opt/data/zookeeper/zkdata

server.1=lwm101:2888:3888

server.2=lwm102:2888:3888

server.3=lwm103:2888:3888

mkdir -p /opt/data/zookeeper/zkdata

cd /opt/data/zookeeper/zkdata

echo 1 > myid

tar -zxvf /opt/w/hadoop-2.7.4.tar.gz -C /opt/m/

cd /opt/m/hadoop-2.7.4/etc/hadoop/   

vi hadoop-env.sh               把JAVA_HOME 目录改为/opt/jdk/

vi yarn-env.sh            把JAVA_HOME 目录改为/opt/jdk/

vi core-site.xml

    fs.defaultFS

    hdfs://master

    hadoop.tmp.dir

    /opt/m/hadoop-2.7.4/tmp

    ha.zookeeper.quorum

    lwm101:2181,lwm102:2181,lwm103:2181

vi hdfs-site.xml

    dfs.replication

    3

    dfs.namenode.name.dir

    /opt/data/hadoop/namenode

   

    dfs.datanode.data.dir   

    /opt/data/hadoop/datanode   

    dfs.nameservices

    master

    dfs.ha.namenodes.master

    nn1,nn2

    dfs.namenode.rpc-address.master.nn1

    lwm101:9000

    dfs.namenode.rpc-address.master.nn2

    lwm102:9000

    dfs.namenode.http-address.master.nn1

    lwm101:50070

    dfs.namenode.http-address.master.nn2

    lwm102:50070

  dfs.namenode.shared.edits.dir

  qjournal://lwm101:8485;lwm102:8485;lwm103:8485/ns1

    dfs.journalnode.edits.dir

    /opt/data/hadoop/journaldata

  dfs.client.failover.proxy.provider.master

  org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider

    dfs.ha.fencing.methods

   

        sshfence

        shell(/bin/true)

   

    dfs.ha.fencing.ssh.private-key-files

    /root/.ssh/id_rsa

    dfs.ha.automatic-failover.enabled

    true

       dfs.ha.fencing.ssh.connect-timeout

       30000

       dfs.webhdfs.enabled

       true

cp mapred-site.xml.template mapred-site.xml

vi mapred-site.xml

      mapreduce.framework.name

      yarn

vi yarn-site.xml

    yarn.nodemanager.aux-services

    mapreduce_shuffle

    yarn.resourcemanager.ha.enabled

    true

    yarn.resourcemanager.cluster-id

    yarncluster

    yarn.resourcemanager.ha.rm-ids

    rm1,rm2

    yarn.resourcemanager.hostname.rm1

    lwm101

    yarn.resourcemanager.hostname.rm2

    lwm102

    yarn.resourcemanager.zk-address

    lwm101:2181,lwm102:2181,lwm103:2181

    yarn.resourcemanager.recovery.enabled

    true

      yarn.resourcemanager.ha.automatic-failover.enabled

      true

    yarn.resourcemanager.store.class

    org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore

    yarn.log-aggregation-enable

    true

 

vi slaves

lwm101

lwm102

lwm103

vi /etc/profile

# 配置jdk系统环境变量

export JAVA_HOME=/opt/m/jdk/

export PATH=$PATH:$JAVA_HOME/bin

export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

# 配置zookeeper系统环境变量

export ZK_HOME=/opt/m/zookeeper-3.4.10

export PATH=$PATH:$ZK_HOME/bin

# 配置Hadoop系统环境变量

export HADOOP_HOME=/opt/m/hadoop-2.7.4

export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

source /etc/profile

克隆出lwm102、lwm103

以lwm102为例

vi /etc/sysconfig/network-scripts/ifcfg-ens33把IP地址101改为102

service network restart

把lwm102连接CRT操作

sed -i '/UUID=/cUUID='`uuidgen`'' /etc/sysconfig/network-scripts/ifcfg-ens33

hostnamectl set-hostname lwm102

cd /opt/data/zookeeper/zkdata

echo 2 > myid

source /etc/profile

lwm103重复以上克隆步骤(绿色的对应修改)

在lwm101操作

rpm -qa | grep openssh

service sshd status

ssh-keygen -t rsa

ssh-copy-id lwm101

ssh-copy-id lwm102

ssh-copy-id lwm103

分别在lwm101、lwm102、lwm103输入以下命令

systemctl stop firewalld.service 

systemctl disable firewalld.service

zkServer.sh start

hadoop-daemon.sh start journalnode

在lwm101操作

hdfs namenode -format

hdfs zkfc -formatZK

scp -r /opt/data/hadoop/namenode/ root@lwm102:/opt/data/hadoop/

start-dfs.sh

start-yarn.sh

分别在lwm101、lwm102、lwm103执行jps查看

------------------------------------------

------------------------------------------

在lwm101:

# 下载并安装wget工具,wget是Linux中的一个下载文件的工具

 yum install wget -y

下载MySQL 5.7的yum资源库,资源库文件会下载到当前目录下

 wget -i -c http://dev.mysql.com/get/mysql57-community-release-el7-10.noarch.rpm

# 安装MySQL 5.7的yum资源库

yum -y install mysql57-community-release-el7-10.noarch.rpm

# 安装MySQL 5.7服务

 yum -y install mysql-community-server

rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022

systemctl start mysqld.service

systemctl status mysqld.service

grep "password" /var/log/mysqld.log

# 修改密码为1qaz@WSX密码策略规则要求密码必须包含英文大小写、数字以及特殊符号

 mysql>  ALTER USER 'root'@'localhost' IDENTIFIED BY '1qaz@WSX';     

# 刷新MySQL配置,使得配置生效

 mysql>  FLUSH PRIVILEGES;

tar -zxvf /opt/w/apache-hive-2.3.7-bin.tar.gz -C /opt/m/

cd  /opt/m/apache-hive-2.3.7-bin/conf/

cp hive-env.sh.template hive-env.sh

vi hive-env.sh

     export HADOOP_HOME=/opt/m/hadoop-2.7.4

     export HIVE_CONF_DIR=/opt/m/apache-hive-2.3.7-bin/conf

     export HIVE_AUX_JARS_PATH=/opt/m/apache-hive-2.3.7-bin/lib

     export JAVA_HOME=/opt/m/jdk

vi hive-site.xml

        hive.metastore.warehouse.dir

        /user/hive_local/warehouse   

        hive.exec.scratchdir

        /tmp_local/hive

 

        hive.metastore.local

        true

        javax.jdo.option.ConnectionURL

        jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true&usessL=false

 

        javax.jdo.option.ConnectionDriverName

        com.mysql.jdbc.Driver

        javax.jdo.option.ConnectionUserName

        root

        javax.jdo.option.ConnectionPassword

        1qaz@WSX

        hive.cli.print.header       

        true

        hive.cli.print.current.db

        true

把mysql-connector-java-5.1.32.jar放到hive/lib

vi /etc/profile

export HIVE_HOME=/opt/m/apache-hive-2.3.7-bin

export PATH=$PATH:$HIVE_HOME/bin

source /etc/profile

scp -r /etc/profile root@lwm102:/etc/profile

schematool -initSchema -dbType mysql

#若初始化完成后出现“schemaTool completed”信息,则说明成功初始化MySQL。

hive --service hiveserver2 &

新建一个lwm101窗口:

cd /opt/m/apache-hive-2.3.7-bin

bin/beeline

在lwm102:

tar -zxvf /opt/w/apache-hive-2.3.7-bin.tar.gz -C /opt/m/

cd /opt/m/apache-hive-2.3.7-bin/conf/

touch hive-site.xml

        hive.metastore.warehouse.dir            

        /user/hive_local/warehouse   

        hive.exec.scratchdir

        /tmp_local/hive

 

        hive.metastore.local

        false 

 

 

        hive.metastore.uris

        thrift://lwm101:9083

        hive.server2.thrift.port

        10000

   

   

        hive.server2.thrift.bind.host

            localhost

   

Scp -r hive-site.xml root@lwm101:/opt/m/apache-hive-2.3.7-bin/conf

Source /etc/profile

beeline -u jdbc:hive2://lwm101:10000 -n root -p

输入1qaz@WSX显示下面则成功

转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/780575.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号