访问官网下载apache-dolphinscheduler-1.3.6-src.tar.gz源码包并进行解压
下载mysql的驱动包(版本: mysql-connector-java-5.1.4 mysql-connector-java-5.1.49.jar 9.jar )
第一步、解压源码包$ tar -zxvf apache-dolphinscheduler-1.3.6-src.tar.gz $ docker-compose up -d第二步、进入docker-compose所在文件夹
$ cd apache-dolphinscheduler-1.3.6-src/docker/docker-swarm第三步、拉取docker镜像
$ docker pull apache/dolphinscheduler:1.3.6第四步、修改镜像的tag为latest
$ docker tag apache/dolphinscheduler:1.3.6 apache/dolphinscheduler:latest第五步、修改config.env.sh文件里面的数据库配置,将其修改为mysql,注释掉PostgreSQL配置选项,启用mysql配置选项
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR ConDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #============================================================================ # Database #============================================================================ # postgresql #注释掉PostgreSQL配置选项,启用mysql配置选项 #DATAbase_TYPE=postgresql #DATAbase_DRIVER=org.postgresql.Driver #DATAbase_HOST=dolphinscheduler-postgresql #DATAbase_PORT=5432 #DATAbase_USERNAME=root #DATAbase_PASSWORD=root #DATAbase_DATAbase=dolphinscheduler #DATAbase_PARAMS=characterEncoding=utf8 # mysql DATAbase_TYPE=mysql DATAbase_DRIVER=com.mysql.jdbc.Driver DATAbase_HOST=dolphinscheduler-mysql DATAbase_PORT=3306 DATAbase_USERNAME=root DATAbase_PASSWORD=root DATAbase_DATAbase=password DATAbase_PARAMS=useUnicode=true&characterEncoding=UTF-8 #============================================================================ # ZooKeeper #============================================================================ ZOOKEEPER_QUORUM=dolphinscheduler-zookeeper:2181 ZOOKEEPER_ROOT=/dolphinscheduler #============================================================================ # Common #============================================================================ # common opts DOLPHINSCHEDULER_OPTS= # common env DATA_baseDIR_PATH=/tmp/dolphinscheduler RESOURCE_STORAGE_TYPE=HDFS RESOURCE_UPLOAD_PATH=/dolphinscheduler FS_DEFAULT_FS=file:/// FS_S3A_ENDPOINT=s3.xxx.amazonaws.com FS_S3A_ACCESS_KEY=xxxxxxx FS_S3A_SECRET_KEY=xxxxxxx HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE=false JAVA_SECURITY_KRB5_CONF_PATH=/opt/krb5.conf LOGIN_USER_KEYTAB_USERNAME=hdfs@HADOOP.COM LOGIN_USER_KEYTAB_PATH=/opt/hdfs.keytab KERBEROS_EXPIRE_TIME=2 HDFS_ROOT_USER=hdfs YARN_RESOURCEMANAGER_HA_RM_IDS= YARN_APPLICATION_STATUS_ADDRESS=http://ds1:8088/ws/v1/cluster/apps/%s # skywalking SKYWALKING_ENABLE=false SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800 SW_GRPC_LOG_SERVER_HOST=127.0.0.1 SW_GRPC_LOG_SERVER_PORT=11800 # dolphinscheduler env HADOOP_HOME=/opt/soft/hadoop HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop SPARK_HOME1=/opt/soft/spark1 SPARK_HOME2=/opt/soft/spark2 PYTHON_HOME=/usr/bin/python JAVA_HOME=/usr/local/openjdk-8 HIVE_HOME=/opt/soft/hive Flink_HOME=/opt/soft/flink DATAX_HOME=/opt/soft/datax #============================================================================ # Master Server #============================================================================ MASTER_SERVER_OPTS=-Xms1g -Xmx1g -Xmn512m MASTER_EXEC_THREADS=100 MASTER_EXEC_TASK_NUM=20 MASTER_DISPATCH_TASK_NUM=3 MASTER_HOST_SELECtOR=LowerWeight MASTER_HEARTBEAT_INTERVAL=10 MASTER_TASK_COMMIT_RETRYTIMES=5 MASTER_TASK_COMMIT_INTERVAL=1000 MASTER_MAX_CPULOAD_AVG=-1 MASTER_RESERVED_MEMORY=0.3 #============================================================================ # Worker Server #============================================================================ WORKER_SERVER_OPTS=-Xms1g -Xmx1g -Xmn512m WORKER_EXEC_THREADS=100 WORKER_HEARTBEAT_INTERVAL=10 WORKER_MAX_CPULOAD_AVG=-1 WORKER_RESERVED_MEMORY=0.3 WORKER_GROUPS=default #============================================================================ # alert Server #============================================================================ alert_SERVER_OPTS=-Xms512m -Xmx512m -Xmn256m # xls file XLS_FILE_PATH=/tmp/xls # mail MAIL_SERVER_HOST= MAIL_SERVER_PORT= MAIL_SENDER= MAIL_USER= MAIL_PASSWD= MAIL_SMTP_STARTTLS_ENABLE=true MAIL_SMTP_SSL_ENABLE=false MAIL_SMTP_SSL_TRUST= # wechat ENTERPRISE_WECHAT_ENABLE=false ENTERPRISE_WECHAT_CORP_ID= ENTERPRISE_WECHAT_SECRET= ENTERPRISE_WECHAT_AGENT_ID= ENTERPRISE_WECHAT_USERS= #============================================================================ # Api Server #============================================================================ API_SERVER_OPTS=-Xms512m -Xmx512m -Xmn256m #============================================================================ # Logger Server #============================================================================ LOGGER_SERVER_OPTS=-Xms512m -Xmx512m -Xmn256m第六步、在dolphinscheduler文件夹下创建mysql文件夹并进入
mkdir mysql cd mysql第七步、创建一个mysql的docker-compose.yml文件,mysql版本5.7,指定root密码
version: "3.1"
services:
dolphinscheduler-mysql:
image: mysql:5.7
container_name: dolphinscheduler-mysql
ports:
- 13306:3306
environment:
- MYSQL_ROOT_PASSWORD=root
volumes:
- dolphinscheduler-mysql:/var/lib/mysql
restart: unless-stopped
volumes:
dolphinscheduler-mysql:
external: true
第八步、创建MySQL数据卷并在docker-compose里面进行挂载
docker volume create dolphinscheduler-mysql第九步、启动mysql容器并跟踪日志查看mysql是否启动正常
docker-compose up -d && docker-compose logs -f第十步、进入mysql容器中,登录mysql并创建dolphinscheduler数据库
$ docker exec -it dolphinscheduler-mysql /bin/bash $ mysql -uroot -p $ CREATE DATAbase dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;第十一步、在dolphinscheduler创建dockerimage文件夹并进入,创建Dockerfile,requirements.txt sources.list文件并将mysql的驱动文件放进去将Python所需要的依赖与mysql的驱动包打成镜像镜像
mkdir dockerimage cd dockerimage touch Dockerfile #docker打镜像文件 touch requirements.txt #需要安装的Python依赖文件 touch sources.list # apt换源文件
FROM apache/dolphinscheduler:latest COPY requirements.txt /tmp COPY sources.list /etc/apt/ COPY mysql-connector-java-5.1.49.jar /opt/dolphinscheduler/lib RUN apt-get update && apt-get upgrade -y && apt-get install -y python3-pip && pip3 install -r /tmp/requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple && rm -rf /var/lib/apt/lists/*
pandas pymysql sqlalchemy
deb http://mirrors.aliyun.com/debian/ buster main non-free contrib deb-src http://mirrors.aliyun.com/debian/ buster main non-free contrib deb http://mirrors.aliyun.com/debian-security buster/updates main deb-src http://mirrors.aliyun.com/debian-security buster/updates main deb http://mirrors.aliyun.com/debian/ buster-updates main non-free contrib deb-src http://mirrors.aliyun.com/debian/ buster-updates main non-free contrib deb http://mirrors.aliyun.com/debian/ buster-backports main non-free contrib deb-src http://mirrors.aliyun.com/debian/ buster-backports main non-free contrib第十二步、打镜像
docker build -t apache/dolphinscheduler:mysql-driver-python3-pymysql .第十三步、进入dolphinscheduler的docker-compose所在的文件夹修改docker-compose.yml文件去掉PostgreSQL部分,并修改master等image为apache/dolphinscheduler:mysql-driver-python3-pymysql镜像文件
cd pache-dolphinscheduler-1.3.6-src/docker/docker-swarm
version: "3.1"
services:
dolphinscheduler-zookeeper:
image: bitnami/zookeeper:latest
container_name: dolphinscheduler-zookeeper
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
restart: unless-stopped
networks:
- dolphinscheduler
dolphinscheduler-api:
image: apache/dolphinscheduler:mysql-driver-python3-pymysql
container_name: dolphinscheduler-api
command: api-server
ports:
- 12345:12345
environment:
TZ: Asia/Shanghai
env_file: .env
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
interval: 30s
timeout: 5s
retries: 3
depends_on:
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- dolphinscheduler-shared-local:/opt/soft
- dolphinscheduler-resource-local:/dolphinscheduler
restart: unless-stopped
networks:
- dolphinscheduler
dolphinscheduler-alert:
image: apache/dolphinscheduler:mysql-driver-python3-pymysql
container_name: dolphinscheduler-alert
command: alert-server
environment:
TZ: Asia/Shanghai
env_file: .env
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "alertServer"]
interval: 30s
timeout: 5s
retries: 3
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
restart: unless-stopped
networks:
- dolphinscheduler
dolphinscheduler-master:
image: apache/dolphinscheduler:mysql-driver-python3-pymysql
container_name: dolphinscheduler-master
command: master-server
environment:
TZ: Asia/Shanghai
env_file: .env
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
interval: 30s
timeout: 5s
retries: 3
depends_on:
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- dolphinscheduler-shared-local:/opt/soft
restart: unless-stopped
networks:
- dolphinscheduler
dolphinscheduler-worker:
image: apache/dolphinscheduler:mysql-driver-python3-pymysql
container_name: dolphinscheduler-worker
command: worker-server
environment:
TZ: Asia/Shanghai
env_file: .env
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
interval: 30s
timeout: 5s
retries: 3
depends_on:
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- dolphinscheduler-shared-local:/opt/soft
- dolphinscheduler-resource-local:/dolphinscheduler
restart: unless-stopped
networks:
- dolphinscheduler
networks:
dolphinscheduler:
driver: bridge
volumes:
dolphinscheduler-zookeeper:
dolphinscheduler-worker-data:
dolphinscheduler-logs:
dolphinscheduler-shared-local:
dolphinscheduler-resource-local:
第十四步、启动
docker-compose up -d && docker-compose logs -f第十五步、访问
http://ip:12345/dolphinscheduler/



