所有compose文件统一命名 docker-compose.yaml
所有启动 docker-compose up -d
停止 docker-compose down
version: '2'
services:
mongo:
image: mongo:4.4.0
restart: always
environment:
- TZ=Asia/Shanghai
ports:
- 27017:27017
volumes:
- ./data/db:/data/db # 挂载数据目录
- ./data/log:/var/log/mongodb # 挂载日志目录
- ./data/config:/etc/mongo # 挂载配置目录
2:redis
将redis.conf放到conf目录下
version: '2'
services:
redis:
image: redis:5
container_name: redis
hostname: redis
restart: always
environment:
- TZ=Asia/Shanghai
ports:
- 6379:6379
volumes:
- ./conf/redis.conf:/etc/redis/redis.conf
- ./data:/data
command:
redis-server /etc/redis/redis.conf
redis.conf
bind 0.0.0.0 protected-mode no port 6379 tcp-backlog 511 timeout 0 tcp-keepalive 300 daemonize no supervised no pidfile /var/run/redis_6379.pid loglevel notice logfile "" databases 16 always-show-logo yes save 900 1 save 300 10 save 60 10000 stop-writes-on-bgsave-error yes rdbcompression yes rdbchecksum yes dbfilename dump.rdb dir ./ replica-serve-stale-data yes replica-read-only yes repl-diskless-sync no repl-diskless-sync-delay 5 repl-disable-tcp-nodelay no replica-priority 100 requirepass root lazyfree-lazy-eviction no lazyfree-lazy-expire no lazyfree-lazy-server-del no replica-lazy-flush no appendonly no appendfilename "appendonly.aof" appendfsync everysec no-appendfsync-on-rewrite no auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb aof-load-truncated yes aof-use-rdb-preamble yes lua-time-limit 5000 slowlog-log-slower-than 10000 slowlog-max-len 128 latency-monitor-threshold 0 notify-keyspace-events "" hash-max-ziplist-entries 512 hash-max-ziplist-value 64 list-max-ziplist-size -2 list-compress-depth 0 set-max-intset-entries 512 zset-max-ziplist-entries 128 zset-max-ziplist-value 64 hll-sparse-max-bytes 3000 stream-node-max-bytes 4096 stream-node-max-entries 100 activerehashing yes client-output-buffer-limit normal 0 0 0 client-output-buffer-limit replica 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 hz 10 dynamic-hz yes aof-rewrite-incremental-fsync yes rdb-save-incremental-fsync yes3: nacos
将application.properties放到当前目录下 nacos/conf/下
version: "2"
services:
nacos:
image: nacos/nacos-server:v2.0.4
container_name: nacos-container
volumes:
- ./nacos/standalone-logs/:/home/nacos/logs
- ./nacos/conf/application.properties:/home/nacos/conf/application.properties
ports:
- "8848:8848"
- "9848:9848"
- "9555:9555"
restart: always
application.properties
spring.datasource.platform=mysql
db.num=1
db.url.0=jdbc:mysql://127.0.0.1:3306/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
db.user=root
db.password=root
nacos.naming.empty-service.auto-clean=true
nacos.naming.empty-service.clean.initial-delay-ms=50000
nacos.naming.empty-service.clean.period-time-ms=30000
management.endpoints.web.cmcsosure.include=*
management.metrics.export.elastic.enabled=false
management.metrics.export.influx.enabled=false
server.tomcat.accesslog.enabled=true
server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i
server.tomcat.basedir=
nacos.security.ignore.urls=/,/error,*.css,*.js,*.html,*.map,*.svg,*.png,*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**
nacos.core.auth.system.type=nacos
nacos.core.auth.enabled=false
nacos.core.auth.default.token.expire.seconds=18000
nacos.core.auth.default.token.secret.key=SecretKey012345678901234567890123456789012345678901234567890123456789
nacos.core.auth.caching.enabled=true
nacos.core.auth.enable.userAgentAuthWhite=false
nacos.core.auth.server.identity.key=serverIdentity
nacos.core.auth.server.identity.value=security
nacos.istio.mcp.server.enabled=false
4:kafka , zookeeper , kafka-manager
version: '3'
services:
zookeeper:
image: wurstmeister/zookeeper
container_name: zookeeper_container
volumes:
- ./zkdata:/data
ports:
- "2181:2181"
restart: always
kafka:
image: wurstmeister/kafka
container_name: kafka_container
volumes:
- ./kfdata:/kafka
ports:
- "39092:9092"
environment:
- KAFKA_ZOOKEEPER_ConNECT=192.168.0.24:2181
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.0.24:9092
- KAFKA_LISTENERS=PLAINTEXT://:9092
- KAFKA_AUTO_CREATE_TOPICS_ENABLE=true
restart: always
kafka-manager:
image: kafkamanager/kafka-manager:2.0.0.2
container_name: kafka-manager_container
environment:
ZK_HOSTS: 192.168.0.24:2181
ports:
- 19000:9000



