解决方法: 将hadoop中share/hadoop/yarn/lib路径下的jline包换成hive中匹配的jar包。 将hadoop2.6.5/share/hadoop/yarn/libjline0.9 换成hive下的jline2.1 用hive jar包中的jline替换掉Hadoop jar包中的jline 在Hadoop根目录下运行命令: find ../ -name jline* 搜索结果: ../hadoop-2.6.5/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jline-0.9.94.jar ../hadoop-2.6.5/share/hadoop/yarn/lib/jline-0.9.94.jar ../hadoop-2.6.5/share/hadoop/kms/tomcat/webapps/kms/WEB-INF/lib/jline-0.9.94.jar ../scala-2.11.4/lib/jline-2.12.jar ../hive1.2.2/lib/jline-2.12.jar ../zookeeper-3.4.11/lib/jline-0.9.94.jar ../zookeeper-3.4.11/lib/jline-0.9.94.LICENSE.txt ../zookeeper-3.4.11/src/java/lib/jline-0.9.94.LICENSE.txt 将Hive根目录下jline-2.12.jar替换到hadoop中 可参考:https://blog.csdn.net/wanghaiyuan1993/article/details/46272553 Zookeeper3.4.11 # 解压Zookeeper包 cd /usr/local/src tar zxvf zookeeper-3.4.11.tar.gz # 修改配置文件 注释第12行,在最后加上配置 cd /usr/local/src/zookeeper-3.4.11/conf cp zoo_sample.cfg zoo.cfg vim zoo.cfg dataDir=/usr/local/src/zookeeper-3.4.11/data dataLogDir=/usr/local/src/zookeeper-3.4.11/log server.1=master:2888:3888 server.2=slave1:2888:3888 server.3=slave2:2888:3888 # 创建日志文件夹及数据文件夹 mkdir /usr/local/src/zookeeper-3.4.11/data mkdir /usr/local/src/zookeeper-3.4.11/log # 配置环境变量 vim ~/.bashrc export ZOOKEEPER_HOME=/usr/local/src/zookeeper-3.4.11 export PATH=$PATH:$ZOOKEEPER_HOME/bin # 复制环境变量到其他节点 scp ~/.bashrc root@slave1:~/ scp ~/.bashrc root@slave2:~/ # 重新加载环境变量 source ~/.bashrc # 复制Zookeeper包到从节点 scp -r /usr/local/src/zookeeper-3.4.11 root@slave1:/usr/local/src/ scp -r /usr/local/src/zookeeper-3.4.11 root@slave2:/usr/local/src/ #分别添加ID,每一个节点的id都不能重复且需与配置文件一致 #Master echo "1">/usr/local/src/zookeeper-3.4.11/data/myid #Slave1 echo "2">/usr/local/src/zookeeper-3.4.11/data/myid #Slave2 echo "3" > /usr/local/src/zookeeper-3.4.11/data/myid #启动Zookeeper服务(每个节点都要启动) zkServer.sh start zkServer.sh stop zkServer.sh status All: QuorumPeerMain Hbase0.9.8 # 解压Hbase包 cd /usr/local/src tar zxvf hbase-1.3.1-bin.tar.gz # 配置regionservers主机名 cd hbase0.9.8/conf vim regionservers master slave1 slave2 # 配置环境变量及不启用Hbase自带的Zookeeper服务 vim hbase-env.sh export JAVA_HOME=/usr/local/src/jdk1.8.0_172 export Hbase_MANAGES_ZK=false 第29、124行 # 配置Hbase核心参数 vim hbase-site.xml
Flume 1.6 # 解压Kafka包 cd /usr/local/src tar zxvf apache-flume-1.6.0-bin.tar.gz # 修改配置文件,配置工作模式 cd apache-flume-1.6.0-bin/conf cp flume-env.sh.template flume-env.sh vi flume-env.sh export JAVA_HOME=/usr/local/src/jdk1.8.0_172 export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote" # 修改配置文件,配置工作模式 cd apache-flume-1.6.0-bin/conf #NetCat vim flume-netcat.conf # Name the components on this agent agent.sources = r1 agent.sinks = k1 agent.channels = c1 # Describe/configuration the source agent.sources.r1.type = netcat agent.sources.r1.bind = 127.0.0.1 agent.sources.r1.port = 44444 # Describe the sink agent.sinks.k1.type = logger # Use a channel which buffers events in memory agent.channels.c1.type = memory agent.channels.c1.capacity = 1000 agent.channels.c1.transactionCapacity = 100 # Bind the source and sink to the channel agent.sources.r1.channels = c1 agent.sinks.k1.channel = c1 #验证 #Server 在 flume1.6.0目录下敲 flume-ng agent --conf conf --conf-file conf/flume-netcat.conf -name=agent -Dflume.root.logger=INFO,console #Client windows上 telnet master 44444
kafka_2.11-0.10.2.1 可参考以下安装: 1.https://blog.csdn.net/qq_43605654/article/details/90786063?depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromBaidu-1&utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromBaidu-1 2.https://blog.csdn.net/CarolRain/article/details/78376642?depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromBaidu-6&utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromBaidu-6 # 解压Kafka包 cd /usr/local/src tar zxvf kafka_2.11-0.10.2.1.tgz # 配置Zookeeper为集群中所有部署zookeeper的主机 cd kafka_2.11-0.10.2.1/config vim server.properties 119行:zookeeper.connect=master:2181,slave1:2181,slave2:218 # 增加环境变量 vim ~/.bashrc export KAFKA_HOME=/usr/local/src/kafka_2.11-0.10.2.1 export PATH=$KAFKA_HOME/bin:$PATH # 复制Kafka包到从节点 scp -r /usr/local/src/kafka_2.11-0.10.2.1 root@slave1:/usr/local/src/ scp -r /usr/local/src/kafka_2.11-0.10.2.1 root@slave2:/usr/local/src/ #复制环境变量到其他节点 scp ~/.bashrc root@slave1:~/ scp ~/.bashrc root@slave2:~/ # 重新加载环境变量 source ~/.bashrc #修改Kafka broker id,集群内每个broker id要求唯一 #Master vim /usr/local/src/kafka_2.11-0.10.2.1/config/server.properties broker.id=0 #Slave1 broker.id=1 #Slave2 broker.id=2 # 创建Zookeeper的启动脚本(如果启动了Zookeeper集群则跳过此步骤) vim /usr/local/src/kafka_2.11-0.10.2.1/bin/start-kafka-zookeeper.sh /usr/local/src/kafka_2.11-0.10.2.1/bin/zookeeper-server-start.sh /usr/local/src/kafka_2.11-0.10.2.1/config/zookeeper.propeties # 授予执行权限 chmod +x /usr/local/src/kafka_2.11-0.10.2.1/bin/start-kafka-zookeeper.sh # 启动Kafka自带的Zookeeper集群(全部节点都需要单独启动) start-kafka-zookeeper.sh # 创建Kafka的启动脚本 vim /usr/local/src/kafka_2.11-0.10.2.1/bin/start-kafka.sh /usr/local/src/kafka_2.11-0.10.2.1/bin/kafka-server-start.sh -daemon /usr/local/src/kafka_2.11-0.10.2.1/config/server.properties # 授予执行权限 chmod +x /usr/local/src/kafka_2.11-0.10.2.1/bin/start-kafka.sh # 复制Kafka启动脚本到从节点 scp -r /usr/local/src/kafka_2.11-0.10.2.1/bin/start-kafka.sh root@slave1:/usr/local/src/kafka_2.11-0.10.2.1/bin/ scp -r /usr/local/src/kafka_2.11-0.10.2.1/bin/start-kafka.sh root@slave2:/usr/local/src/kafka_2.11-0.10.2.1/bin/ # 启动Kafka集群 (全部节点都需要单独启动) start-kafka.sh bin/kafka-server-start.sh config/server.properties
Spark安装1.6.3 # 解压Spark和Scala包 cd /usr/local/src tar zxvf spark-2.0.2-bin-hadoop2.6.tgz tar zxvf scala-2.11.8.tgz # 修改配置文件 配置环境变量 cd spark-2.0.2-bin-hadoop2.6/conf cp spark-env.sh.template spark-env.sh vim spark-env.sh export SCALA_HOME=/usr/local/src/scala-2.11.4 export JAVA_HOME=/usr/local/src/jdk1.8.0_172 export HADOOP_HOME=/usr/local/src/hadoop-2.6.5 export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop SPARK_MASTER_IP=master SPARK_LOCAL_DIRS=/usr/local/src/spark1.63. SPARK_DRIVER_MEMORY=1G # 修改配置文件 添加从节点主机名 cp slaves.template slaves vim slaves slave1 slave2 # 配置环境变量,在配置最后加入 vim ~/.bashrc # 复制环境变量到其他节点 export SCALA_HOME=/usr/local/src/scala export PATH=$PATH:$SCALA_HOME/bin export SPARK_HOME=/usr/local/src/spark1.6.3 export PATH=$PATH:$SPARK_HOME/bin:$SPARK_HOME/sbin scp -r ~/.bashrc root@slave1:~/ scp -r ~/.bashrc root@slave2:~/ # 复制Scala包到从节点 scp -r /usr/local/src/scala-2.11.4 root@slave1:/usr/local/src/ scp -r /usr/local/src/scala-2.11.4 root@slave2:/usr/local/src/ # 复制Spark包到从节点 scp -r /usr/local/src/spark1.6.3 root@slave1:/usr/local/src/ scp -r /usr/local/src/spark1.6.3 root@slave2:/usr/local/src/ # 重新加载环境变量 source ~/.bashrc # 启动集群 start-all.sh start-master.sh start-slaves.sh 关闭: stop-all.sh stop-master.sh stop-slaves.sh WEBUI查看: http://master:8080/



