文章目录
前言
已经安装好了宝塔面板、并且可以在Docker栏目,选择docker安装,会把docker和docker-compose都安装好了
一、docker-compose.yml配置
在任意一个目录下创建并编辑
vim docker-compose.yml
配置粘贴
version: '3.8' services: rabbitmq: image: rabbitmq:3.7.25-management container_name: rabbitmq volumes: - /mydata/rabbitmq/data:/var/lib/rabbitmq - /mydata/rabbitmq/log:/var/log/rabbitmq ports: - 5672:5672 - 15672:15672
编辑好后,直接执行以下命令:
docker-compose -f docker-composer-env.yml up -d
安装好后,这里就会出现如下页面
二、安全组规则添加端口
三、通过浏览器访问rabbitmq的管控页面
校验是否正常
四、提供其他项目依赖
替换docker-composer-env.yml文件内容
version: '3.8' services: nacos: restart: always image: nacos/nacos-server:1.4.2 container_name: nacos environment: - MODE=standalone volumes: - /mydata/nacos/logs/:/home/nacos/logs ports: - 8848:8848 spark-master: restart: always image: bitnami/spark:2.4.3 container_name: spark-master user: root environment: - "SPARK_MODE=master" - "SPARK_RPC_AUTHENTICATION_ENABLED=no" - "SPARK_RPC_ENCRYPTION_ENABLED=no" - "SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no" - "SPARK_SSL_ENABLED=no" ports: - 8180:8080 - 7077:7077 volumes: - /mydata/python:/python spark-worker1: restart: always image: bitnami/spark:2.4.3 container_name: spark-worker1 user: root environment: - "SPARK_MODE=worker" - "SPARK_MASTER_URL=spark://master:7077" - "SPARK_WORKER_MEMORY=1G" - "SPARK_WORKER_CORES=1" - "SPARK_RPC_AUTHENTICATION_ENABLED=no" - "SPARK_RPC_ENCRYPTION_ENABLED=no" - "SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no" - "SPARK_SSL_ENABLED=no" spark-worker2: restart: always image: bitnami/spark:2.4.3 container_name: spark-worker2 user: root environment: - "SPARK_MODE=worker" - "SPARK_MASTER_URL=spark://master:7077" - "SPARK_WORKER_MEMORY=1G" - "SPARK_WORKER_CORES=1" - "SPARK_RPC_AUTHENTICATION_ENABLED=no" - "SPARK_RPC_ENCRYPTION_ENABLED=no" - "SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no" seata-server: container_name: seata-server restart: always image: seataio/seata-server:1.4.0 environment: - "SEATA_IP=106.14.132.94" - "SEATA_CONFIG_NAME=file:/root/seata-config/registry" volumes: - /mydata/seataServer/conf:/root/seata-config - /mydata/seataServer/logs:/root/logs ports: - 8091:8091 oap: image: apache/skywalking-oap-server:8.1.0-es7 container_name: oap depends_on: - elasticsearch links: - elasticsearch restart: always ports: - 11800:11800 - 12800:12800 environment: - "SW_STORAGE=elasticsearch7" - "SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200" - "TZ=Asia/Shanghai" ui: image: apache/skywalking-ui:8.1.0 container_name: ui depends_on: - oap links: - oap restart: always ports: - 8280:8080 environment: - "SW_OAP_ADDRESS=oap:12800" - "TZ=Asia/Shanghai" kafka: restart: always image: wurstmeister/kafka container_name: kafka volumes: - /etc/localtime:/etc/localtime ports: - 9092:9092 environment: - "KAFKA_ADVERTISED_HOST_NAME=106.14.132.94" - "KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181" - "KAFKA_ADVERTISED_PORT=9092" - "KAFKA_LOG_RETENTION_HOURS=120" - "KAFKA_MESSAGE_MAX_BYTES=10000000" - "KAFKA_REPLICA_FETCH_MAX_BYTES=10000000" - "KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS=60000" - "KAFKA_NUM_PARTITIONS=3" - "KAFKA_DELETE_RETENTION_MS=1000" kafka-manager: restart: always image: sheepkiller/kafka-manager container_name: kafka-manager environment: ZK_HOSTS: 106.14.132.94 ports: - 9009:9000 rabbitmq: restart: always image: rabbitmq:3.7.25-management container_name: rabbitmq volumes: - /mydata/rabbitmq/data:/var/lib/rabbitmq - /mydata/rabbitmq/log:/var/log/rabbitmq ports: - 5672:5672 - 15672:15672 elasticsearch: restart: always image: elasticsearch:7.9.0 container_name: elasticsearch environment: - "cluster.name=elasticsearch" - "discovery.type=single-node" - "ES_JAVA_OPTS=-Xms512m -Xmx512m" volumes: - /mydata/elasticsearch/plugins:/usr/share/elasticsearch/plugins - /mydata/elasticsearch/data:/usr/share/elasticsearch/data ports: - 9200:9200 - 9300:9300 kibana: restart: always image: kibana:6.4.0 container_name: kibana links: - elasticsearch:es depends_on: - elasticsearch environment: - "elasticsearch.hosts=http://es:9200" ports: - 5601:5601 logstash: restart: always image: logstash:6.4.0 container_name: logstash volumes: - /mydata/logstash/filebeat-filter-console.conf:/usr/share/logstash/pipeline/filebeat-filter-console.conf - /mydata/logstash/filebeat-filter-es.conf:/usr/share/logstash/pipeline/filebeat-filter-es.conf - /mydata/logstash/filebeat-filter-print.conf:/usr/share/logstash/pipeline/filebeat-filter-print.conf depends_on: - elasticsearch links: - elasticsearch:es ports: - 4560:4560 mongo: restart: always image: mongo:3.2 container_name: mongo volumes: - /mydata/mongo/db:/data/db ports: - 27017:27017 zookeeper: restart: always image: zookeeper:3.5 container_name: zookeeper ports: - 2181:2181 volumes: - /mydata/zookeeper/data:/data - /mydata/zookeeper/conf:/conf rocketmq: image: rocketmqinc/rocketmq container_name: rocketmq restart: always ports: - 9876:9876 volumes: - /mydata/rocketmq/logs:/home/rocketmq/logs - /mydata/rocketmq/store:/home/rocketmq/store command: sh mqnamesrv broker: image: rocketmqinc/rocketmq container_name: rmqbroker restart: always ports: - 10909:10909 - 10911:10911 - 10912:10912 volumes: - /mydata/rocketmq/logs:/home/rocketmq/logs - /mydata/rocketmq/store:/home/rocketmq/store - /mydata/rocketmq/conf/broker.conf:/opt/rocketmq-4.4.0/conf/broker.conf command: sh mqbroker -n namesrv:9876 -c ../conf/broker.conf depends_on: - rocketmq environment: - JAVA_HOME=/usr/lib/jvm/jre console: image: styletang/rocketmq-console-ng container_name: rocketmq-console-ng restart: always ports: - 8076:8080 depends_on: - rocketmq environment: - JAVA_OPTS= -Dlogging.level.root=info -Drocketmq.namesrv.addr=rocketmq:9876 - Dcom.rocketmq.sendMessageWithVIPChannel=false
启动成功之后如下图示例:
总结
提示:宝塔面板安装docker/docker-compose,一条命令搞定rabbitmq的安装与配置。