Browse Source

Merge remote-tracking branch 'origin/master'

feature/dangjian
李鹏飞 6 years ago
parent
commit
44757b2669
  1. 3
      .gitmodules
  2. 52
      esua-epdc/doc/db/dev_youhua/esua_epdc_user.sql
  3. 14
      esua-epdc/docker-compose/jinshui/app/admin/docker-compose.yml
  4. 14
      esua-epdc/docker-compose/jinshui/app/api/docker-compose.yml
  5. 14
      esua-epdc/docker-compose/jinshui/app/auth/docker-compose.yml
  6. 14
      esua-epdc/docker-compose/jinshui/app/events/docker-compose.yml
  7. 14
      esua-epdc/docker-compose/jinshui/app/gateway/docker-compose.yml
  8. 14
      esua-epdc/docker-compose/jinshui/app/group/docker-compose.yml
  9. 14
      esua-epdc/docker-compose/jinshui/app/message/docker-compose.yml
  10. 14
      esua-epdc/docker-compose/jinshui/app/news/docker-compose.yml
  11. 14
      esua-epdc/docker-compose/jinshui/app/oss/docker-compose.yml
  12. 14
      esua-epdc/docker-compose/jinshui/app/user/docker-compose.yml
  13. 14
      esua-epdc/docker-compose/jinshui/app/websocket/docker-compose.yml
  14. 45
      esua-epdc/docker-compose/jinshui/node03/1-mysql/conf/mysql.conf.cnf
  15. 21
      esua-epdc/docker-compose/jinshui/node03/1-mysql/docker-compose.yml
  16. 20
      esua-epdc/docker-compose/jinshui/node03/2-nginx/docker-compose.yml
  17. 27
      esua-epdc/docker-compose/jinshui/node03/3-nacos/docker-compose.yml
  18. 17
      esua-epdc/docker-compose/jinshui/node03/4-redis/docker-compose.yml
  19. 9
      esua-epdc/docker-compose/jinshui/node03/4-redis/sentinel/conf/sentinel1.conf
  20. 18
      esua-epdc/docker-compose/jinshui/node03/4-redis/sentinel/docker-compose.yml
  21. 73
      esua-epdc/docker-compose/jinshui/node03/5-seata/conf/registry.conf
  22. 25
      esua-epdc/docker-compose/jinshui/node03/5-seata/docker-compose.yml
  23. 9
      esua-epdc/docker-compose/jinshui/node03/Readme.md
  24. 36
      esua-epdc/docker-compose/jinshui/node04/1-mysql/conf/mysql.conf.cnf
  25. 22
      esua-epdc/docker-compose/jinshui/node04/1-mysql/docker-compose.yml
  26. 50
      esua-epdc/docker-compose/jinshui/node04/2-nacos/docker-compose.yml
  27. 73
      esua-epdc/docker-compose/jinshui/node04/3-seata/conf/registry.conf
  28. 25
      esua-epdc/docker-compose/jinshui/node04/3-seata/docker-compose.yml
  29. 81
      esua-epdc/docker-compose/jinshui/node04/3-seata/script/config.txt
  30. 89
      esua-epdc/docker-compose/jinshui/node04/3-seata/script/nacos-config.sh
  31. 29
      esua-epdc/docker-compose/jinshui/node04/4-redis/docker-compose.yml
  32. 9
      esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/conf/sentinel1.conf
  33. 9
      esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/conf/sentinel2.conf
  34. 31
      esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/docker-compose.yml
  35. 8
      esua-epdc/docker-compose/jinshui/node04/Readme.md
  36. 39
      esua-epdc/docker-compose/jinshui/node04/fastdfs/Dockerfile
  37. 45
      esua-epdc/docker-compose/jinshui/node04/fastdfs/README.md
  38. 63
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/client.conf
  39. 29
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/http.conf
  40. 1065
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/mime.types
  41. 134
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/mod_fastdfs.conf
  42. 127
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/nginx.conf
  43. 287
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/storage.conf
  44. 278
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/tracker.conf
  45. 26
      esua-epdc/docker-compose/jinshui/node04/fastdfs/fastdfs.sh
  46. 47
      esua-epdc/docker-compose/jinshui/node04/fastdfs/nginx/docker-compose.yml
  47. 47
      esua-epdc/docker-compose/jinshui/node04/fastdfs/storage/docker-compose.yml
  48. 47
      esua-epdc/docker-compose/jinshui/node04/fastdfs/tracker/docker-compose.yml
  49. 14
      esua-epdc/docker-compose/prod/master/application/10.5.34.162-master/docker-compose.yml
  50. 50
      esua-epdc/docker-compose/prod/master/picture/10.5.34.166/docker-compose.yml
  51. 25
      esua-epdc/docker-compose/prod/node01/3-nacos/docker-compose.yml
  52. 73
      esua-epdc/docker-compose/prod/node01/4-seata/conf/registry.conf
  53. 39
      esua-epdc/docker-compose/prod/node01/4-seata/docker-compose.yml
  54. 84
      esua-epdc/docker-compose/prod/node01/4-seata/script/config.txt
  55. 89
      esua-epdc/docker-compose/prod/node01/4-seata/script/nacos-config.sh
  56. 48
      esua-epdc/docker-compose/prod/node02/2-nacos/docker-compose.yml
  57. 73
      esua-epdc/docker-compose/prod/node02/5-seata/conf/registry.conf
  58. 21
      esua-epdc/docker-compose/prod/node02/5-seata/docker-compose.yml
  59. 39
      esua-epdc/docker-compose/test/node01/1-mysql/conf/mysql.conf.cnf
  60. 1
      esua-epdc/docker-compose/test/node01/2-nginx/docker-compose.yml
  61. 1
      esua-epdc/docker-compose/test/node01/3-nacos/docker-compose.yml
  62. 3
      esua-epdc/docker-compose/test/node01/4-redis/docker-compose.yml
  63. 1
      esua-epdc/docker-compose/test/node01/5-seata/docker-compose.yml
  64. 35
      esua-epdc/docker-compose/test/node02/1-mysql/conf/mysql.conf.cnf
  65. 3
      esua-epdc/docker-compose/test/node02/1-mysql/docker-compose.yml
  66. 4
      esua-epdc/docker-compose/test/node02/2-nacos/docker-compose.yml
  67. 2
      esua-epdc/docker-compose/test/node02/3-seata/docker-compose.yml
  68. 6
      esua-epdc/docker-compose/test/node02/4-redis/docker-compose.yml
  69. 4
      esua-epdc/epdc-admin/epdc-admin-server/Dockerfile
  70. 16
      esua-epdc/epdc-admin/epdc-admin-server/pom.xml
  71. 2
      esua-epdc/epdc-admin/epdc-admin-server/src/main/resources/application-test.yml
  72. 4
      esua-epdc/epdc-auth/Dockerfile
  73. 6
      esua-epdc/epdc-auth/pom.xml
  74. 10
      esua-epdc/epdc-auth/src/main/resources/logback-spring.xml
  75. 6
      esua-epdc/epdc-commons/epdc-common-clienttoken/pom.xml
  76. 12
      esua-epdc/epdc-commons/epdc-common-clienttoken/src/main/java/com/elink/esua/epdc/common/token/dto/TokenDto.java
  77. 20
      esua-epdc/epdc-commons/epdc-common-clienttoken/src/main/java/com/elink/esua/epdc/common/token/util/CpUserDetailRedis.java
  78. 36
      esua-epdc/epdc-commons/epdc-commons-mybatis/src/main/java/com/elink/esua/epdc/commons/mybatis/entity/DeptScope.java
  79. 74
      esua-epdc/epdc-commons/epdc-commons-mybatis/src/main/java/com/elink/esua/epdc/commons/mybatis/utils/DeptEntityUtils.java
  80. 2
      esua-epdc/epdc-commons/epdc-commons-tools-wx-ma/pom.xml
  81. 12
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/NacosConfigConstant.java
  82. 1
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/NumConstant.java
  83. 13
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/PointsConstant.java
  84. 5
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/StrConstant.java
  85. 35
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/UserAuthTypeEnum.java
  86. 28
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/UserTagEnum.java
  87. 40
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsBehaviorCodeEnum.java
  88. 27
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsLimitTimeEnum.java
  89. 24
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsOperationEnum.java
  90. 34
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsOperationModeEnum.java
  91. 26
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsRuleAvailableEnum.java
  92. 36
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsRuleCodeEnum.java
  93. 28
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsUpperLimitEnum.java
  94. 271
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/utils/GPSUtils.java
  95. 36
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/utils/LocalDateUtils.java
  96. 246
      esua-epdc/epdc-gateway/pom.xml
  97. 69
      esua-epdc/epdc-gateway/src/main/java/com/elink/esua/epdc/config/CorsConfig.java
  98. 54
      esua-epdc/epdc-gateway/src/main/java/com/elink/esua/epdc/feign/ResourceFeignClient.java
  99. 35
      esua-epdc/epdc-gateway/src/main/java/com/elink/esua/epdc/feign/fallback/ResourceFeignClientFallback.java
  100. 154
      esua-epdc/epdc-gateway/src/main/java/com/elink/esua/epdc/filter/AuthFilter.java

3
.gitmodules

@ -0,0 +1,3 @@
[submodule "esua-epdc/epdc-cloud-gateway-shibei"]
path = esua-epdc/epdc-cloud-gateway-shibei
url = http://121.42.41.42:7070/r/epdc-cloud-gateway-shibei.git

52
esua-epdc/doc/db/dev_youhua/esua_epdc_user.sql

@ -0,0 +1,52 @@
--------线
ALTER TABLE esua_epdc_user.epdc_party_authentication_failed MODIFY COLUMN `STATE` VARCHAR(1) NULL COMMENT '状态 0-认证失败';
ALTER TABLE esua_epdc_user.epdc_party_authentication_failed MODIFY COLUMN `CADRE_FLAG` varchar(1) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT 0 NULL COMMENT '干部下沉标识 0-否,1-是';
ALTER TABLE esua_epdc_user.epdc_user_authenticate_history ADD AUTHENTICATED_TYPE varchar(2) NOT NULL COMMENT '认证类别(0-居民认证,1-党员认证,2-志愿者认证)';
------------------------------------------------
CREATE TABLE `epdc_handle_category` (
`ID` varchar(32) NOT NULL COMMENT '主键',
`CATEGORY_VAL` int(11) NOT NULL COMMENT '处理类别值',
`CATEGORY_LABEL` varchar(20) NOT NULL COMMENT '处理类别显示信息',
`AVAILABLE` varchar(1) NOT NULL DEFAULT '1' COMMENT '可用状态(0-不可用,1-可用)',
`SORT` int(11) NOT NULL DEFAULT '0' COMMENT '排序',
`REVISION` int(11) DEFAULT NULL COMMENT '乐观锁',
`CREATED_BY` varchar(32) DEFAULT NULL COMMENT '创建人',
`CREATED_TIME` datetime DEFAULT NULL COMMENT '创建时间',
`UPDATED_BY` varchar(32) DEFAULT NULL COMMENT '更新人',
`UPDATED_TIME` datetime DEFAULT NULL COMMENT '更新时间',
`DEL_FLAG` varchar(1) NOT NULL COMMENT '删除标识 0:未删除,1:已删除',
`CATEGORY_CODE` varchar(32) NOT NULL COMMENT '处理类型编码',
PRIMARY KEY (`ID`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='处理类别表'
CREATE TABLE `epdc_role_category` (
`ID` varchar(32) NOT NULL COMMENT '主键',
`ROLE_ID` varchar(32) NOT NULL COMMENT '角色ID',
`CATEGORY_ID` varchar(32) NOT NULL COMMENT '处理类别ID',
`REVISION` int(11) DEFAULT NULL COMMENT '乐观锁',
`CREATED_BY` varchar(32) DEFAULT NULL COMMENT '创建人',
`CREATED_TIME` datetime DEFAULT NULL COMMENT '创建时间',
`UPDATED_BY` varchar(32) DEFAULT NULL COMMENT '更新人',
`UPDATED_TIME` datetime DEFAULT NULL COMMENT '更新时间',
`DEL_FLAG` varchar(1) NOT NULL COMMENT '删除标识 0:未删除,1:已删除',
PRIMARY KEY (`ID`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='角色和处理类别关系表'
ALTER TABLE EPDC_HANDLE_ROLE_CATEGORY COMMENT '角色和处理类别关系表';;
CREATE TABLE EPDC_HANDLE_ROLE_DEPT(
ID VARCHAR(32) NOT NULL COMMENT '主键' ,
ROLE_ID VARCHAR(32) NOT NULL COMMENT '角色ID' ,
DEPT_ID VARCHAR(32) NOT NULL COMMENT '部门ID' ,
DEPT_TYPE VARCHAR(50) NOT NULL COMMENT '部门机构类型' ,
REVISION INT COMMENT '乐观锁' ,
CREATED_BY VARCHAR(32) COMMENT '创建人' ,
CREATED_TIME DATETIME COMMENT '创建时间' ,
UPDATED_BY VARCHAR(32) COMMENT '更新人' ,
UPDATED_TIME DATETIME COMMENT '更新时间' ,
PRIMARY KEY (ID)
) COMMENT = '处理部门角色权限表 处理部门角色权限表';;
ALTER TABLE EPDC_HANDLE_ROLE_DEPT COMMENT '处理部门角色权限表';;

14
esua-epdc/docker-compose/jinshui/app/admin/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-admin-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-admin-server:prod
container_name: epdc-admin-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.41
volumes:
- /mnt/epdc/app/admin/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/api/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-api-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-api-server:prod
container_name: epdc-api-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.42
volumes:
- /mnt/epdc/app/api/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/auth/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-auth:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-auth:prod
container_name: epdc-auth-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.43
volumes:
- /mnt/epdc/app/auth/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/events/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-events-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-events-server:prod
container_name: epdc-events-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.44
volumes:
- /mnt/epdc/app/events/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/gateway/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-gateway:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-gateway:prod
container_name: epdc-gateway-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.40
volumes:
- /mnt/epdc/app/gateway/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/group/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-group-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-group-server:prod
container_name: epdc-group-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.45
volumes:
- /mnt/epdc/app/group/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/message/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-message-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-message-server:prod
container_name: epdc-message-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.46
volumes:
- /mnt/epdc/app/message/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/news/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-news-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-news-server:prod
container_name: epdc-news-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.47
volumes:
- /mnt/epdc/app/news/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/oss/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-oss-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-oss-server:prod
container_name: epdc-oss-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.48
volumes:
- /mnt/epdc/app/oss/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/user/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-user-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-user-server:prod
container_name: epdc-user-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.49
volumes:
- /mnt/epdc/app/user/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/websocket/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-websocket-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-websocket-server:prod
container_name: epdc-websocket-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.50
volumes:
- /mnt/epdc/app/websocket/logs:/logs
networks:
epdc_network:
external: true

45
esua-epdc/docker-compose/jinshui/node03/1-mysql/conf/mysql.conf.cnf

@ -0,0 +1,45 @@
[client]
default-character-set=utf8
[mysqld]
character-set-server=utf8
##### 这两部操作可以解决mysql连接很慢的问题 #####
# 根据官方文档说明,如果你的mysql主机查询DNS很慢或是有很多客户端主机时会导致连接很慢,由于我们的开发机器是不能够连接外网的,
# 所以DNS解析是不可能完成的,从而也就明白了为什么连接那么慢了。同时,请注意在增加该配置参数后,mysql的授权表中的host字段就
# 不能够使用域名而只能够使用 ip地址了,因为这是禁止了域名解析的结果。
# 1.禁止域名解析
skip-host-cache
# 2.禁用dns解析,但是,这样不能在mysql的授权表中使用主机名了,只能使用IP。
skip-name-resolve
# 数据库编号, 要具有唯一性, 不能跟其他数据库重复, 方便同步区分
server-id = 21
# mysql日志
log_bin = /var/lib/mysql/mysql-bin.log
log-bin-index=slave-relay-bin.index
#日志记录的格式
binlog_format=MIXED
#单个日志文件最大
max_binlog_size = 512M 
#从库建议开启,有利于数据一致性
relay_log_recovery = 1   
#如果从库还会用做主库,建议开启
log_slave_updates = 1   
# 中继日志:存储所有主库TP过来的binlog事件主库binlog:记录主库发生过的修改事件
# relay-log = /var/lib/mysql/mysql-relay-bin.log
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION

21
esua-epdc/docker-compose/jinshui/node03/1-mysql/docker-compose.yml

@ -0,0 +1,21 @@
version: "3.7"
services:
mysql-slave:
container_name: mysql-slave
image: mysql:5.7
environment:
TZ: Asia/Shanghai
MYSQL_ROOT_PASSWORD: epdc!elink1405
MYSQL_LOWER_CASE_TABLE_NAMES: 1
volumes:
- /etc/localtime:/etc/localtime
- /etc/timezone:/etc/timezone
- /mnt/epdc/mysql/data:/var/lib/mysql
- /mnt/epdc/mysql/conf/mysql.conf.cnf:/etc/mysql/conf.d/mysql.conf.cnf
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.3
networks:
epdc_network:
external: true

20
esua-epdc/docker-compose/jinshui/node03/2-nginx/docker-compose.yml

@ -0,0 +1,20 @@
version: "3.7"
services:
web:
image: nginx
ports:
- 80:80
- 443:443
volumes:
- /mnt/epdc/nginx/html:/usr/share/nginx/html
- /mnt/epdc/nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro
- /mnt/epdc/nginx/conf.d:/etc/nginx/conf.d:ro
- /mnt/epdc/nginx/logs:/var/log/nginx
restart: always
container_name: nginx_master
networks:
epdc_network:
ipv4_address: 172.20.0.4
networks:
epdc_network:
external: true

27
esua-epdc/docker-compose/jinshui/node03/3-nacos/docker-compose.yml

@ -0,0 +1,27 @@
version: "3.7"
services:
nacos3:
image: nacos/nacos-server:latest
container_name: nacos3
networks:
epdc_network:
ipv4_address: 172.20.0.5
volumes:
- /mnt/epdc/nacos/logs:/home/nacos/logs
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_IP: 172.20.0.5 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.20.0.5:8848 172.19.0.3:8848 172.19.0.4:8848 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_MASTER_SERVICE_HOST: 172.19.0.2 #mysql配置,Master为主节点,Slave为从节点
MYSQL_MASTER_SERVICE_PORT: 3306
MYSQL_MASTER_SERVICE_DB_NAME: epdc_nacos
MYSQL_MASTER_SERVICE_USER: nacos
MYSQL_MASTER_SERVICE_PASSWORD: elink!nacos888
MYSQL_SLAVE_SERVICE_HOST: 172.20.0.3
MYSQL_SLAVE_SERVICE_PORT: 3306
restart: always
networks:
epdc_network:
external: true

17
esua-epdc/docker-compose/jinshui/node03/4-redis/docker-compose.yml

@ -0,0 +1,17 @@
version: '3.7'
services:
slave2:
image: redis
container_name: redis-slave-2
command: redis-server --slaveof 172.19.0.11 6379 --requirepass epdc!redis@slave1405 --masterauth epdc!redis@master1405 --logfile /data/log/redis-slave2.log
restart: always
volumes:
- /mnt/epdc/redis/log:/data/log
- /mnt/epdc/redis/data:/data
networks:
epdc_network:
ipv4_address: 172.20.0.11
networks:
epdc_network:
external: true

9
esua-epdc/docker-compose/jinshui/node03/4-redis/sentinel/conf/sentinel1.conf

@ -0,0 +1,9 @@
port 26379
logfile "/usr/local/redis/sentinel/log/sentinel.log"
dir "/usr/local/redis/sentinel"
sentinel monitor epdcmaster 172.19.0.11 6379 2
sentinel down-after-milliseconds epdcmaster 30000
sentinel parallel-syncs epdcmaster 1
sentinel failover-timeout epdcmaster 180000
sentinel deny-scripts-reconfig yes
sentinel auth-pass epdcmaster epdc!redis@master1405

18
esua-epdc/docker-compose/jinshui/node03/4-redis/sentinel/docker-compose.yml

@ -0,0 +1,18 @@
version: '3.7'
services:
sentinel2:
image: redis
container_name: redis-sentinel-2
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
volumes:
- /mnt/epdc/redis/sentinel/conf/sentinel1.conf:/usr/local/etc/redis/sentinel.conf
- /mnt/epdc/redis/sentinel/data:/data
- /mnt/epdc/redis/sentinel/log:/usr/local/redis/sentinel/log
- /mnt/epdc/redis/sentinel/dir:/usr/local/redis/sentinel
networks:
epdc_network:
ipv4_address: 172.20.0.12
networks:
epdc_network:
external: true

73
esua-epdc/docker-compose/jinshui/node03/5-seata/conf/registry.conf

@ -0,0 +1,73 @@
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}

25
esua-epdc/docker-compose/prod/master/db/10.5.34.164/docker-compose.yml → esua-epdc/docker-compose/jinshui/node03/5-seata/docker-compose.yml

@ -21,15 +21,20 @@
version: "3.7"
services:
seata-server:
seata-server2:
container_name: seata-server2
image: seataio/seata-server:latest
hostname: seata-server
ports:
- 9101:8091
environment:
- SEATA_PORT=8091
- SEATA_IP=10.5.34.164
- STORE_MODE=db
- SERVER_NODE=1
expose:
- 8091
SEATA_IP: 172.20.0.21
SEATA_PORT: 8091
STORE_MODE: db
SERVER_NODE: 2
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /mnt/epdc/seata/seata-config:/root/seata-config
networks:
epdc_network:
ipv4_address: 172.20.0.21
networks:
epdc_network:
external: true

9
esua-epdc/docker-compose/jinshui/node03/Readme.md

@ -0,0 +1,9 @@
1. 创建网络:
```
docker network create -d bridge --subnet 172.20.0.0/24 epdc_network
```
2. 执行1-mysql中的docker-compose.yml
3. 执行2-nacos中的docker-compose.yml
https://github.com/alibaba/nacos/blob/master/distribution/conf/nacos-mysql.sql

36
esua-epdc/docker-compose/jinshui/node04/1-mysql/conf/mysql.conf.cnf

@ -0,0 +1,36 @@
[client]
default-character-set=utf8
[mysqld]
character-set-server=utf8
##### 这两部操作可以解决mysql连接很慢的问题 #####
# 根据官方文档说明,如果你的mysql主机查询DNS很慢或是有很多客户端主机时会导致连接很慢,由于我们的开发机器是不能够连接外网的,
# 所以DNS解析是不可能完成的,从而也就明白了为什么连接那么慢了。同时,请注意在增加该配置参数后,mysql的授权表中的host字段就
# 不能够使用域名而只能够使用 ip地址了,因为这是禁止了域名解析的结果。
# 1.禁止域名解析
skip-host-cache
# 2.禁用dns解析,但是,这样不能在mysql的授权表中使用主机名了,只能使用IP。
skip-name-resolve
# 数据库编号, 要具有唯一性, 不能跟其他数据库重复, 方便同步区分
server-id = 11
# mysql日志
log_bin = /var/lib/mysql/master-bin.log
log-bin-index=master-bin.index
# binlog日志格式,mysql默认采用statement,建议使用 mixed(是statement和row模式的结合)
binlog_format = mixed
#单个日志文件最大
max_binlog_size = 512M
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION

22
esua-epdc/docker-compose/jinshui/node04/1-mysql/docker-compose.yml

@ -0,0 +1,22 @@
version: "3.7"
services:
mysql-master:
container_name: mysql-master
image: mysql:5.7
environment:
TZ: Asia/Shanghai
MYSQL_ROOT_PASSWORD: epdc!elink1405
MYSQL_LOWER_CASE_TABLE_NAMES: 1
volumes:
- /etc/localtime:/etc/localtime
- /etc/timezone:/etc/timezone
- /mnt/epdc/mysql/data:/var/lib/mysql
- /mnt/epdc/mysql/conf/mysql.conf.cnf:/etc/mysql/conf.d/mysql.conf.cnf
restart: always
networks:
epdc_network:
ipv4_address: 172.19.0.2
networks:
epdc_network:
external: true

50
esua-epdc/docker-compose/jinshui/node04/2-nacos/docker-compose.yml

@ -0,0 +1,50 @@
version: "3.7"
services:
nacos1:
image: nacos/nacos-server:latest
container_name: nacos1
networks:
epdc_network:
ipv4_address: 172.19.0.3
volumes:
- /mnt/epdc/nacos/logs/nacos1:/home/nacos/logs
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_IP: 172.19.0.3 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.20.0.5:8848 172.19.0.3:8848 172.19.0.4:8848 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_MASTER_SERVICE_HOST: 172.19.0.2 #mysql配置,Master为主节点,Slave为从节点
MYSQL_MASTER_SERVICE_PORT: 3306
MYSQL_MASTER_SERVICE_DB_NAME: epdc_nacos
MYSQL_MASTER_SERVICE_USER: nacos
MYSQL_MASTER_SERVICE_PASSWORD: elink!nacos888
MYSQL_SLAVE_SERVICE_HOST: 172.20.0.3
MYSQL_SLAVE_SERVICE_PORT: 3306
restart: on-failure
nacos2:
image: nacos/nacos-server:latest
container_name: nacos2
networks:
epdc_network:
ipv4_address: 172.19.0.4
volumes:
- /mnt/epdc/nacos/logs/nacos2:/home/nacos/logs
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_IP: 172.19.0.4 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.20.0.5:8848 172.19.0.3:8848 172.19.0.4:8848 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_MASTER_SERVICE_HOST: 172.19.0.2 #mysql配置,Master为主节点,Slave为从节点
MYSQL_MASTER_SERVICE_PORT: 3306
MYSQL_MASTER_SERVICE_DB_NAME: epdc_nacos
MYSQL_MASTER_SERVICE_USER: nacos
MYSQL_MASTER_SERVICE_PASSWORD: elink!nacos888
MYSQL_SLAVE_SERVICE_HOST: 172.20.0.3
MYSQL_SLAVE_SERVICE_PORT: 3306
restart: always
networks:
epdc_network:
external: true

73
esua-epdc/docker-compose/jinshui/node04/3-seata/conf/registry.conf

@ -0,0 +1,73 @@
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}

25
esua-epdc/docker-compose/prod/master/db/10.5.34.166/docker-compose.yml → esua-epdc/docker-compose/jinshui/node04/3-seata/docker-compose.yml

@ -21,15 +21,20 @@
version: "3.7"
services:
seata-server:
seata-server1:
container_name: seata-server1
image: seataio/seata-server:latest
hostname: seata-server
ports:
- 9101:8091
environment:
- SEATA_PORT=8091
- SEATA_IP=10.5.34.166
- STORE_MODE=db
- SERVER_NODE=1
expose:
- 8091
SEATA_IP: 172.19.0.21
SEATA_PORT: 8091
STORE_MODE: db
SERVER_NODE: 1
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /mnt/epdc/seata/seata-config:/root/seata-config
networks:
epdc_network:
ipv4_address: 172.19.0.21
networks:
epdc_network:
external: true

81
esua-epdc/docker-compose/jinshui/node04/3-seata/script/config.txt

@ -0,0 +1,81 @@
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enable-client-batch-send-request=false
transport.thread-factory.boss-thread-prefix=NettyBoss
transport.thread-factory.worker-thread-prefix=NettyServerNIOWorker
transport.thread-factory.server-executor-thread-prefix=NettyServerBizHandler
transport.thread-factory.share-boss-worker=false
transport.thread-factory.client-selector-thread-prefix=NettyClientSelector
transport.thread-factory.client-selector-thread-size=1
transport.thread-factory.client-worker-thread-prefix=NettyClientWorkerThread
transport.thread-factory.boss-thread-size=1
transport.thread-factory.worker-thread-size=8
transport.shutdown.wait=3
service.vgroup_mapping.my_test_tx_group=default
service.vgroup_mapping.epdc-api-server-fescar-service-group=default
service.vgroup_mapping.epdc-demo-server-fescar-service-group=default
service.vgroup_mapping.epdc-user-server-fescar-service-group=default
service.vgroup_mapping.epdc-services-server-fescar-service-group=default
service.vgroup_mapping.epdc-party-server-fescar-service-group=default
service.vgroup_mapping.epdc-heart-server-fescar-service-group=default
service.vgroup_mapping.epdc-neighbor-server-fescar-service-group=default
service.vgroup_mapping.epdc-oss-server-fescar-service-group=default
service.vgroup_mapping.epdc-message-server-fescar-service-group=default
service.vgroup_mapping.epdc-news-server-fescar-service-group=default
service.vgroup_mapping.epdc-job-server-fescar-service-group=default
service.vgroup_mapping.epdc-admin-server-fescar-service-group=default
service.vgroup_mapping.epdc-activiti-server-fescar-service-group=default
service.vgroup_mapping.epdc-kpi-server-fescar-service-group=default
service.vgroup_mapping.epdc-points-server-fescar-service-group=default
service.vgroup_mapping.epdc-webservice-server-fescar-service-group=default
service.vgroup_mapping.epdc-events-server-fescar-service-group=default
service.enableDegrade=false
service.disableGlobalTransaction=false
client.rm.async.commit.buffer.limit=10000
client.rm.lock.retry.internal=10
client.rm.lock.retry.times=30
client.rm.report.retry.count=5
client.rm.lock.retry.policy.branch-rollback-on-conflict=true
client.rm.table.meta.check.enable=false
client.rm.report.success.enable=true
client.tm.commit.retry.count=5
client.tm.rollback.retry.count=5
store.mode=db
store.file.dir=file_store/data
store.file.max-branch-session-size=16384
store.file.max-global-session-size=512
store.file.file-write-buffer-cache-size=16384
store.file.flush-disk-mode=async
store.file.session.reload.read_size=100
store.db.datasource=dbcp
store.db.db-type=mysql
store.db.driver-class-name=com.mysql.jdbc.Driver
store.db.url=jdbc:mysql://172.19.0.2:3306/seata?useUnicode=true
store.db.user=epdc
store.db.password=Elink@833066
store.db.min-conn=1
store.db.max-conn=3
store.db.global.table=global_table
store.db.branch.table=branch_table
store.db.query-limit=100
store.db.lock-table=lock_table
server.recovery.committing-retry-period=1000
server.recovery.asyn-committing-retry-period=1000
server.recovery.rollbacking-retry-period=1000
server.recovery.timeout-retry-period=1000
server.max.commit.retry.timeout=-1
server.max.rollback.retry.timeout=-1
server.rollback.retry.timeout.unlock.enable=false
client.undo.data.validation=true
client.undo.log.serialization=jackson
server.undo.log.save.days=7
server.undo.log.delete.period=86400000
client.undo.log.table=undo_log
client.log.exceptionRate=100
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registry-type=compact
metrics.exporter-list=prometheus
metrics.exporter-prometheus-port=9898

89
esua-epdc/docker-compose/jinshui/node04/3-seata/script/nacos-config.sh

@ -0,0 +1,89 @@
#!/usr/bin/env bash
# Copyright 1999-2019 Seata.io Group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at、
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
while getopts ":h:p:g:t:" opt
do
case $opt in
h)
host=$OPTARG
;;
p)
port=$OPTARG
;;
g)
group=$OPTARG
;;
t)
tenant=$OPTARG
;;
?)
echo "\033[31m USAGE OPTION: $0 [-h host] [-p port] [-g group] [-t tenant] \033[0m"
exit 1
;;
esac
done
if [[ -z ${host} ]]; then
host=localhost
fi
if [[ -z ${port} ]]; then
port=8848
fi
if [[ -z ${group} ]]; then
group="SEATA_GROUP"
fi
if [[ -z ${tenant} ]]; then
tenant=""
fi
nacosAddr=$host:$port
contentType="content-type:application/json;charset=UTF-8"
echo "set nacosAddr=$nacosAddr"
echo "set group=$group"
failCount=0
tempLog=$(mktemp -u)
function addConfig() {
curl -X POST -H "${1}" "http://$2/nacos/v1/cs/configs?dataId=$3&group=$group&content=$4&tenant=$tenant" >"${tempLog}" 2>/dev/null
if [[ -z $(cat "${tempLog}") ]]; then
echo "\033[31m Please check the cluster status. \033[0m"
exit 1
fi
if [[ $(cat "${tempLog}") =~ "true" ]]; then
echo "Set $3=$4\033[32m successfully \033[0m"
else
echo "Set $3=$4\033[31m failure \033[0m"
(( failCount++ ))
fi
}
count=0
for line in $(cat $(dirname "$PWD")/config.txt); do
(( count++ ))
key=${line%%=*}
value=${line#*=}
addConfig "${contentType}" "${nacosAddr}" "${key}" "${value}"
done
echo "========================================================================="
echo " Complete initialization parameters, \033[32m total-count:$count \033[0m, \033[31m failure-count:$failCount \033[0m"
echo "========================================================================="
if [[ ${failCount} -eq 0 ]]; then
echo "\033[32m Init nacos config finished, please start seata-server. \033[0m"
else
echo "\033[31m init nacos config fail. \033[0m"
fi

29
esua-epdc/docker-compose/jinshui/node04/4-redis/docker-compose.yml

@ -0,0 +1,29 @@
version: '3.7'
services:
master:
image: redis
container_name: redis-master
command: redis-server --requirepass epdc!redis@master1405 --logfile /data/log/redis-master.log
restart: always
volumes:
- /mnt/epdc/redis/log:/data/log
- /mnt/epdc/redis/data:/data
networks:
epdc_network:
ipv4_address: 172.19.0.11
slave1:
image: redis
container_name: redis-slave-1
command: redis-server --slaveof 172.19.0.11 6379 --requirepass epdc!redis@slave1405 --masterauth epdc!redis@master1405 --logfile /data/log/redis-slave1.log
restart: always
volumes:
- /mnt/epdc/redis/log:/data/log
- /mnt/epdc/redis/data:/data
networks:
epdc_network:
ipv4_address: 172.19.0.12
networks:
epdc_network:
external: true

9
esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/conf/sentinel1.conf

@ -0,0 +1,9 @@
port 26379
logfile "/usr/local/redis/sentinel/log/sentinel.log"
dir "/usr/local/redis/sentinel"
sentinel monitor epdcmaster 172.19.0.11 6379 2
sentinel down-after-milliseconds epdcmaster 30000
sentinel parallel-syncs epdcmaster 1
sentinel failover-timeout epdcmaster 180000
sentinel deny-scripts-reconfig yes
sentinel auth-pass epdcmaster epdc!redis@master1405

9
esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/conf/sentinel2.conf

@ -0,0 +1,9 @@
port 26379
logfile "/usr/local/redis/sentinel/log/sentinel.log"
dir "/usr/local/redis/sentinel"
sentinel monitor epdcmaster 172.19.0.11 6379 2
sentinel down-after-milliseconds epdcmaster 30000
sentinel parallel-syncs epdcmaster 1
sentinel failover-timeout epdcmaster 180000
sentinel deny-scripts-reconfig yes
sentinel auth-pass epdcmaster epdc!redis@master1405

31
esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/docker-compose.yml

@ -0,0 +1,31 @@
version: '3.7'
services:
sentinel1:
image: redis
container_name: redis-sentinel-1
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
volumes:
- /mnt/epdc/redis/sentinel/conf/sentinel1.conf:/usr/local/etc/redis/sentinel.conf
- /mnt/epdc/redis/sentinel/data:/data
- /mnt/epdc/redis/sentinel/log:/usr/local/redis/sentinel/log
- /mnt/epdc/redis/sentinel/dir:/usr/local/redis/sentinel
networks:
epdc_network:
ipv4_address: 172.19.0.13
sentinel3:
image: redis
container_name: redis-sentinel-3
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
volumes:
- /mnt/epdc/redis/sentinel/conf/sentinel2.conf:/usr/local/etc/redis/sentinel.conf
- /mnt/epdc/redis/sentinel/data2:/data
- /mnt/epdc/redis/sentinel/log2:/usr/local/redis/sentinel/log
- /mnt/epdc/redis/sentinel/dir2:/usr/local/redis/sentinel
networks:
epdc_network:
ipv4_address: 172.19.0.14
networks:
epdc_network:
external: true

8
esua-epdc/docker-compose/jinshui/node04/Readme.md

@ -0,0 +1,8 @@
1. 创建网络:
```
docker network create -d bridge --subnet 172.19.0.0/24 epdc_network
```
2. 执行1-mysql中的docker-compose.yml
3. 执行2-nacos中的docker-compose.yml

39
esua-epdc/docker-compose/jinshui/node04/fastdfs/Dockerfile

@ -0,0 +1,39 @@
# centos 7
FROM centos:7
# 添加配置文件
ADD conf/client.conf /etc/fdfs/
ADD conf/http.conf /etc/fdfs/
ADD conf/mime.types /etc/fdfs/
ADD conf/storage.conf /etc/fdfs/
ADD conf/tracker.conf /etc/fdfs/
ADD fastdfs.sh /home
ADD conf/nginx.conf /etc/fdfs/
ADD conf/mod_fastdfs.conf /etc/fdfs
# run
RUN yum install git gcc gcc-c++ make automake autoconf libtool pcre pcre-devel zlib zlib-devel openssl-devel wget vim -y \
&& cd /usr/local/src \
&& git clone https://github.com/happyfish100/libfastcommon.git --depth 1 \
&& git clone https://github.com/happyfish100/fastdfs.git --depth 1 \
&& git clone https://github.com/happyfish100/fastdfs-nginx-module.git --depth 1 \
&& wget http://nginx.org/download/nginx-1.15.4.tar.gz \
&& tar -zxvf nginx-1.15.4.tar.gz \
&& mkdir /home/dfs \
&& cd /usr/local/src/ \
&& cd libfastcommon/ \
&& ./make.sh && ./make.sh install \
&& cd ../ \
&& cd fastdfs/ \
&& ./make.sh && ./make.sh install \
&& cd ../ \
&& cd nginx-1.15.4/ \
&& ./configure --add-module=/usr/local/src/fastdfs-nginx-module/src/ \
&& make && make install \
&& chmod +x /home/fastdfs.sh
# export config
VOLUME /etc/fdfs
EXPOSE 22122 23000 8888 80
ENTRYPOINT ["/home/fastdfs.sh"]

45
esua-epdc/docker-compose/jinshui/node04/fastdfs/README.md

@ -0,0 +1,45 @@
# FastDFS Dockerfile network (网络版本)
## 声明
其实并没什么区别 教程是在上一位huayanYu(小锅盖)和 Wiki的作者 的基础上进行了一些修改,本质上还是huayanYu(小锅盖) 和 Wiki 上的作者写的教程
## 目录介绍
### conf
Dockerfile 所需要的一些配置文件
当然你也可以对这些文件进行一些修改 比如 storage.conf 里面的 bast_path 等相关
## 使用方法
需要注意的是 你需要在运行容器的时候制定宿主机的ip 用参数 FASTDFS_IPADDR 来指定
```
docker run -d -e FASTDFS_IPADDR=192.168.1.234 -p 8888:8888 -p 22122:22122 -p 23000:23000 -p 8011:80 --name test-fast 镜像id/镜像名称
```
## 后记
本质上 local 版本与 network 版本无区别
## Statement
In fact, there is no difference between the tutorials written by Huayan Yu and Wiki on the basis of their previous authors. In essence, they are also tutorials written by the authors of Huayan Yu and Wiki.
## Catalogue introduction
### conf
Dockerfile Some configuration files needed
Of course, you can also make some modifications to these files, such as bast_path in storage. conf, etc.
## Usage method
Note that you need to specify the host IP when running the container with the parameter FASTDFS_IPADDR
Here's a sample docker run instruction
```
docker run -d -e FASTDFS_IPADDR=192.168.1.234 -p 8888:8888 -p 22122:22122 -p 23000:23000 -p 8011:80 --name test-fast 镜像id/镜像名称
```
## Epilogue
Essentially, there is no difference between the local version and the network version.

63
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/client.conf

@ -0,0 +1,63 @@
# connect timeout in seconds
# default value is 30s
connect_timeout=30
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store log files
base_path=/home/dfs
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=172.19.0.30:22122
tracker_server=172.20.0.30:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# if load FastDFS parameters from tracker server
# since V4.05
# default value is false
load_fdfs_parameters_from_tracker=false
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V4.05
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V4.05
storage_ids_filename = storage_ids.conf
#HTTP settings
http.tracker_server_port=80
#use "#include" directive to include HTTP other settiongs
##include http.conf

29
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/http.conf

@ -0,0 +1,29 @@
# HTTP default content type
http.default_content_type = application/octet-stream
# MIME types mapping filename
# MIME types file format: MIME_type extensions
# such as: image/jpeg jpeg jpg jpe
# you can use apache's MIME file: mime.types
http.mime_types_filename=mime.types
# if use token to anti-steal
# default value is false (0)
http.anti_steal.check_token=false
# token TTL (time to live), seconds
# default value is 600
http.anti_steal.token_ttl=900
# secret key to generate anti-steal token
# this parameter must be set when http.anti_steal.check_token set to true
# the length of the secret key should not exceed 128 bytes
http.anti_steal.secret_key=FastDFS1234567890
# return the content of the file when check token fail
# default value is empty (no file sepecified)
http.anti_steal.token_check_fail=/home/yuqing/fastdfs/conf/anti-steal.jpg
# if support multi regions for HTTP Range
# default value is true
http.multi_range.enabed = true

1065
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/mime.types

File diff suppressed because it is too large

134
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/mod_fastdfs.conf

@ -0,0 +1,134 @@
# connect timeout in seconds
# default value is 30s
connect_timeout=2
# network recv and send timeout in seconds
# default value is 30s
network_timeout=30
# the base path to store log files
base_path=/tmp
# if load FastDFS parameters from tracker server
# since V1.12
# default value is false
load_fdfs_parameters_from_tracker=true
# storage sync file max delay seconds
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V1.12
# default value is 86400 seconds (one day)
storage_sync_file_max_delay = 86400
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V1.13
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V1.13
storage_ids_filename = storage_ids.conf
# FastDFS tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
# valid only when load_fdfs_parameters_from_tracker is true
tracker_server=172.19.0.30:22122
tracker_server=172.20.0.30:22122
# the port of the local storage server
# the default value is 23000
storage_server_port=23000
# the group name of the local storage server
group_name=group1
# if the url / uri including the group name
# set to false when uri like /M00/00/00/xxx
# set to true when uri like ${group_name}/M00/00/00/xxx, such as group1/M00/xxx
# default value is false
url_have_group_name = true
# path(disk or mount point) count, default value is 1
# must same as storage.conf
store_path_count=1
# store_path#, based 0, if store_path0 not exists, it's value is base_path
# the paths must be exist
# must same as storage.conf
store_path0=/home/dfs
#store_path1=/home/yuqing/fastdfs1
# standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# set the log filename, such as /usr/local/apache2/logs/mod_fastdfs.log
# empty for output to stderr (apache and nginx error_log file)
log_filename=
# response mode when the file not exist in the local file system
## proxy: get the content from other storage server, then send to client
## redirect: redirect to the original storage server (HTTP Header is Location)
response_mode=proxy
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# this paramter used to get all ip address of the local host
# default values is empty
if_alias_prefix=
# use "#include" directive to include HTTP config file
# NOTE: #include is an include directive, do NOT remove the # before include
#include http.conf
# if support flv
# default value is false
# since v1.15
flv_support = true
# flv file extension name
# default value is flv
# since v1.15
flv_extension = flv
# set the group count
# set to none zero to support multi-group on this storage server
# set to 0 for single group only
# groups settings section as [group1], [group2], ..., [groupN]
# default value is 0
# since v1.14
group_count = 0
# group settings for group #1
# since v1.14
# when support multi-group on this storage server, uncomment following section
#[group1]
#group_name=group1
#storage_server_port=23000
#store_path_count=2
#store_path0=/home/yuqing/fastdfs
#store_path1=/home/yuqing/fastdfs1
# group settings for group #2
# since v1.14
# when support multi-group, uncomment following section as neccessary
#[group2]
#group_name=group2
#storage_server_port=23000
#store_path_count=1
#store_path0=/home/yuqing/fastdfs

127
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/nginx.conf

@ -0,0 +1,127 @@
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root html;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
server {
listen 8888;
server_name localhost;
location ~/group[0-9]/ {
ngx_fastdfs_module;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}

287
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/storage.conf

@ -0,0 +1,287 @@
# is this config file disabled
# false for enabled
# true for disabled
disabled=false
# the name of the group this storage server belongs to
#
# comment or remove this item for fetching from tracker server,
# in this case, use_storage_id must set to true in tracker.conf,
# and storage_ids.conf must be configed correctly.
group_name=epdcFile
# bind an address of this host
# empty for bind all addresses of this host
bind_addr=
# if bind an address of this host when connect to other servers
# (this storage server as a client)
# true for binding the address configed by above parameter: "bind_addr"
# false for binding any address of this host
client_bind=true
# the storage server port
port=23000
# connect timeout in seconds
# default value is 30s
connect_timeout=10
# network timeout in seconds
# default value is 30s
network_timeout=60
# heart beat interval in seconds
heart_beat_interval=30
# disk usage report interval in seconds
stat_report_interval=60
# the base path to store data and log files
base_path=/home/dfs
# max concurrent connections the server supported
# default value is 256
# more max_connections means more memory will be used
# you should set this parameter larger, eg. 10240
max_connections=1024
# the buff size to recv / send data
# this parameter must more than 8KB
# default value is 64KB
# since V2.00
buff_size = 256KB
# accept thread count
# default value is 1
# since V4.07
accept_threads=1
# work thread count, should <= max_connections
# work thread deal network io
# default value is 4
# since V2.00
work_threads=4
# if disk read / write separated
## false for mixed read and write
## true for separated read and write
# default value is true
# since V2.00
disk_rw_separated = true
# disk reader thread count per store base path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_reader_threads = 1
# disk writer thread count per store base path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_writer_threads = 1
# when no entry to sync, try read binlog again after X milliseconds
# must > 0, default value is 200ms
sync_wait_msec=50
# after sync a file, usleep milliseconds
# 0 for sync successively (never call usleep)
sync_interval=0
# storage sync start time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_start_time=00:00
# storage sync end time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_end_time=23:59
# write to the mark file after sync N files
# default value is 500
write_mark_file_freq=500
# path(disk or mount point) count, default value is 1
store_path_count=1
# store_path#, based 0, if store_path0 not exists, it's value is base_path
# the paths must be exist
store_path0=/home/dfs
#store_path1=/home/dfs2
# subdir_count * subdir_count directories will be auto created under each
# store_path (disk), value can be 1 to 256, default value is 256
subdir_count_per_path=256
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=172.19.0.30:22122
tracker_server=172.20.0.30:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user=
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts=*
# the mode of the files distributed to the data path
# 0: round robin(default)
# 1: random, distributted by hash code
file_distribute_path_mode=0
# valid when file_distribute_to_path is set to 0 (round robin),
# when the written file count reaches this number, then rotate to next path
# default value is 100
file_distribute_rotate_count=100
# call fsync to disk when write big file
# 0: never call fsync
# other: call fsync when written bytes >= this bytes
# default value is 0 (never call fsync)
fsync_after_written_bytes=0
# sync log buff to disk every interval seconds
# must > 0, default value is 10 seconds
sync_log_buff_interval=10
# sync binlog buff / cache to disk every interval seconds
# default value is 60 seconds
sync_binlog_buff_interval=10
# sync storage stat info to disk every interval seconds
# default value is 300 seconds
sync_stat_file_interval=300
# thread stack size, should >= 512KB
# default value is 512KB
thread_stack_size=512KB
# the priority as a source server for uploading file.
# the lower this value, the higher its uploading priority.
# default value is 10
upload_priority=10
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# default values is empty
if_alias_prefix=
# if check file duplicate, when set to true, use FastDHT to store file indexes
# 1 or yes: need check
# 0 or no: do not check
# default value is 0
check_file_duplicate=0
# file signature method for check file duplicate
## hash: four 32 bits hash code
## md5: MD5 signature
# default value is hash
# since V4.01
file_signature_method=hash
# namespace for storing file indexes (key-value pairs)
# this item must be set when check_file_duplicate is true / on
key_namespace=FastDFS
# set keep_alive to 1 to enable persistent connection with FastDHT servers
# default value is 0 (short connection)
keep_alive=0
# you can use "#include filename" (not include double quotes) directive to
# load FastDHT server list, when the filename is a relative path such as
# pure filename, the base path is the base path of current/this config file.
# must set FastDHT server list when check_file_duplicate is true / on
# please see INSTALL of FastDHT for detail
##include /home/yuqing/fastdht/conf/fdht_servers.conf
# if log to access log
# default value is false
# since V4.00
use_access_log = false
# if rotate the access log every day
# default value is false
# since V4.00
rotate_access_log = false
# rotate access log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.00
access_log_rotate_time=00:00
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time=00:00
# rotate access log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_access_log_size = 0
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if skip the invalid record when sync file
# default value is false
# since V4.02
file_sync_skip_invalid_record=false
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# use the ip address of this storage server if domain_name is empty,
# else this domain name will ocur in the url redirected by the tracker server
http.domain_name=
# the port of the web server on this storage server
http.server_port=8888

278
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/tracker.conf

@ -0,0 +1,278 @@
# is this config file disabled
# false for enabled
# true for disabled
disabled=false
# bind an address of this host
# empty for bind all addresses of this host
bind_addr=
# the tracker server port
port=22122
# connect timeout in seconds
# default value is 30s
connect_timeout=10
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store data and log files
base_path=/home/dfs
# max concurrent connections this server supported
# you should set this parameter larger, eg. 102400
max_connections=1024
# accept thread count
# default value is 1
# since V4.07
accept_threads=1
# work thread count, should <= max_connections
# default value is 4
# since V2.00
work_threads=4
# min buff size
# default value 8KB
min_buff_size = 8KB
# max buff size
# default value 128KB
max_buff_size = 128KB
# the method of selecting group to upload files
# 0: round robin
# 1: specify group
# 2: load balance, select the max free space group to upload file
store_lookup=2
# which group to upload file
# when store_lookup set to 1, must set store_group to the group name
store_group=group2
# which storage server to upload file
# 0: round robin (default)
# 1: the first server order by ip address
# 2: the first server order by priority (the minimal)
# Note: if use_trunk_file set to true, must set store_server to 1 or 2
store_server=0
# which path(means disk or mount point) of the storage server to upload file
# 0: round robin
# 2: load balance, select the max free space path to upload file
store_path=0
# which storage server to download file
# 0: round robin (default)
# 1: the source storage server which the current file uploaded to
download_server=0
# reserved storage space for system or other applications.
# if the free(available) space of any stoarge server in
# a group <= reserved_storage_space,
# no file can be uploaded to this group.
# bytes unit can be one of follows:
### G or g for gigabyte(GB)
### M or m for megabyte(MB)
### K or k for kilobyte(KB)
### no unit for byte(B)
### XX.XX% as ratio such as reserved_storage_space = 10%
reserved_storage_space = 1%
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user=
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts=*
# sync log buff to disk every interval seconds
# default value is 10 seconds
sync_log_buff_interval = 10
# check storage server alive interval seconds
check_active_interval = 120
# thread stack size, should >= 64KB
# default value is 64KB
thread_stack_size = 64KB
# auto adjust when the ip address of the storage server changed
# default value is true
storage_ip_changed_auto_adjust = true
# storage sync file max delay seconds
# default value is 86400 seconds (one day)
# since V2.00
storage_sync_file_max_delay = 86400
# the max time of storage sync a file
# default value is 300 seconds
# since V2.00
storage_sync_file_max_time = 300
# if use a trunk file to store several small files
# default value is false
# since V3.00
use_trunk_file = false
# the min slot size, should <= 4KB
# default value is 256 bytes
# since V3.00
slot_min_size = 256
# the max slot size, should > slot_min_size
# store the upload file to trunk file when it's size <= this value
# default value is 16MB
# since V3.00
slot_max_size = 16MB
# the trunk file size, should >= 4MB
# default value is 64MB
# since V3.00
trunk_file_size = 64MB
# if create trunk file advancely
# default value is false
# since V3.06
trunk_create_file_advance = false
# the time base to create trunk file
# the time format: HH:MM
# default value is 02:00
# since V3.06
trunk_create_file_time_base = 02:00
# the interval of create trunk file, unit: second
# default value is 38400 (one day)
# since V3.06
trunk_create_file_interval = 86400
# the threshold to create trunk file
# when the free trunk file size less than the threshold, will create
# the trunk files
# default value is 0
# since V3.06
trunk_create_file_space_threshold = 20G
# if check trunk space occupying when loading trunk free spaces
# the occupied spaces will be ignored
# default value is false
# since V3.09
# NOTICE: set this parameter to true will slow the loading of trunk spaces
# when startup. you should set this parameter to true when neccessary.
trunk_init_check_occupying = false
# if ignore storage_trunk.dat, reload from trunk binlog
# default value is false
# since V3.10
# set to true once for version upgrade when your version less than V3.10
trunk_init_reload_from_binlog = false
# the min interval for compressing the trunk binlog file
# unit: second
# default value is 0, 0 means never compress
# FastDFS compress the trunk binlog when trunk init and trunk destroy
# recommand to set this parameter to 86400 (one day)
# since V5.01
trunk_compress_binlog_min_interval = 0
# if use storage ID instead of IP address
# default value is false
# since V4.00
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# since V4.00
storage_ids_filename = storage_ids.conf
# id type of the storage server in the filename, values are:
## ip: the ip address of the storage server
## id: the server id of the storage server
# this paramter is valid only when use_storage_id set to true
# default value is ip
# since V4.03
id_type_in_filename = ip
# if store slave file use symbol link
# default value is false
# since V4.01
store_slave_file_use_link = false
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time=00:00
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# HTTP port on this tracker server
http.server_port=8080
# check storage HTTP server alive interval seconds
# <= 0 for never check
# default value is 30
http.check_alive_interval=30
# check storage HTTP server alive type, values are:
# tcp : connect to the storge server with HTTP port only,
# do not request and get response
# http: storage check alive url must return http status 200
# default value is tcp
http.check_alive_type=tcp
# check storage HTTP server alive uri/url
# NOTE: storage embed HTTP server support uri: /status.html
http.check_alive_uri=/status.html

26
esua-epdc/docker-compose/jinshui/node04/fastdfs/fastdfs.sh

@ -0,0 +1,26 @@
#!/bin/bash
new_val=$FASTDFS_IPADDR
old="com.ikingtech.ch116221"
sed -i "s/$old/$new_val/g" /etc/fdfs/client.conf
sed -i "s/$old/$new_val/g" /etc/fdfs/storage.conf
sed -i "s/$old/$new_val/g" /etc/fdfs/mod_fastdfs.conf
cat /etc/fdfs/client.conf > /etc/fdfs/client.txt
cat /etc/fdfs/storage.conf > /etc/fdfs/storage.txt
cat /etc/fdfs/mod_fastdfs.conf > /etc/fdfs/mod_fastdfs.txt
mv /usr/local/nginx/conf/nginx.conf /usr/local/nginx/conf/nginx.conf.t
cp /etc/fdfs/nginx.conf /usr/local/nginx/conf
echo "start trackerd"
/etc/init.d/fdfs_trackerd start
echo "start storage"
/etc/init.d/fdfs_storaged start
echo "start nginx"
/usr/local/nginx/sbin/nginx
tail -f /dev/null

47
esua-epdc/docker-compose/jinshui/node04/fastdfs/nginx/docker-compose.yml

@ -0,0 +1,47 @@
version: '3.7'
services:
fastdfs-tracker:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/tracker_data:/fastdfs/tracker/data
command: tracker
networks:
epdc_network:
ipv4_address: 172.19.0.30
fastdfs-storage:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/storage_data:/fastdfs/storage/data
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: storage
networks:
epdc_network:
ipv4_address: 172.19.0.31
fastdfs-nginx:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/nginx/nginx.conf:/etc/nginx/conf/nginx.conf
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: nginx
networks:
epdc_network:
ipv4_address: 172.19.0.32
networks:
epdc_network:
external: true

47
esua-epdc/docker-compose/jinshui/node04/fastdfs/storage/docker-compose.yml

@ -0,0 +1,47 @@
version: '3.7'
services:
fastdfs-tracker:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/tracker_data:/fastdfs/tracker/data
command: tracker
networks:
epdc_network:
ipv4_address: 172.19.0.30
fastdfs-storage:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/storage_data:/fastdfs/storage/data
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: storage
networks:
epdc_network:
ipv4_address: 172.19.0.31
fastdfs-nginx:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/nginx/nginx.conf:/etc/nginx/conf/nginx.conf
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: nginx
networks:
epdc_network:
ipv4_address: 172.19.0.32
networks:
epdc_network:
external: true

47
esua-epdc/docker-compose/jinshui/node04/fastdfs/tracker/docker-compose.yml

@ -0,0 +1,47 @@
version: '3.7'
services:
fastdfs-tracker:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/tracker_data:/fastdfs/tracker/data
command: tracker
networks:
epdc_network:
ipv4_address: 172.19.0.30
fastdfs-storage:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/storage_data:/fastdfs/storage/data
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: storage
networks:
epdc_network:
ipv4_address: 172.19.0.31
fastdfs-nginx:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/nginx/nginx.conf:/etc/nginx/conf/nginx.conf
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: nginx
networks:
epdc_network:
ipv4_address: 172.19.0.32
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/prod/master/application/10.5.34.162-master/docker-compose.yml

@ -1,14 +0,0 @@
version: '3.7'
services:
web:
image: nginx
ports:
- 443:443
volumes:
- /mnt/nginx/html:/usr/share/nginx/html
- /mnt/nginx/conf/nginx.conf:/etc/nginx/nginx.conf
- /mnt/nginx/conf.d:/etc/nginx/conf.d
- /mnt/nginx/logs:/var/log/nginx
restart: always
container_name: nginx_master

50
esua-epdc/docker-compose/prod/master/picture/10.5.34.166/docker-compose.yml

@ -1,50 +0,0 @@
version: '3.7'
services:
epdc-events-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-events-server:prod
ports:
- "9066:9066"
epdc-gateway:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-gateway:prod
ports:
- "9094:9094"
epdc-auth:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-auth:prod
ports:
- "9056:9056"
epdc-admin-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-admin-server:prod
ports:
- "9055:9055"
epdc-oss-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-oss-server:prod
ports:
- "9065:9065"
epdc-api-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-api-server:prod
ports:
- "9040:9040"
epdc-news-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-news-server:prod
prort:
- "9064:9064"
epdc-user-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-user-server:prod
prort:
- "9068:9068"
epdc-websocket-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-websocket-server:prod
prort:
- "9988:9988"
epdc-kpi-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-kpi-server:prod
prort:
- "9987:9987"
epdc-group-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-group-server:prod
prort:
- "9063:9063"
epdc-message-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-message-server:prod
prort:
- "9062:9062"

25
esua-epdc/docker-compose/prod/node01/3-nacos/docker-compose.yml

@ -0,0 +1,25 @@
version: "3.7"
services:
nacos1:
image: nacos/nacos-server:latest
container_name: nacos1
ports:
- 10001:10001
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /mnt/epdc/nacos/logs/nacos1:/home/nacos/logs
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_PORT: 10001
NACOS_SERVER_IP: 172.16.0.53 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.16.0.53:10001 172.16.0.51:10001 172.16.0.51:10002 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_SERVICE_HOST: 172.16.0.52 #mysql配置,Master为主节点,Slave为从节点
MYSQL_SERVICE_PORT: 3306
MYSQL_SERVICE_DB_NAME: esua_epdc_nacos
MYSQL_SERVICE_USER: epdc
MYSQL_SERVICE_PASSWORD: Elink@833066
MYSQL_DATABASE_NUM: 2
restart: always

73
esua-epdc/docker-compose/prod/node01/4-seata/conf/registry.conf

@ -0,0 +1,73 @@
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}

39
esua-epdc/docker-compose/prod/node01/4-seata/docker-compose.yml

@ -0,0 +1,39 @@
#环境变量
#seata-server 支持以下环境变量:
#
#SEATA_IP
#可选, 指定seata-server启动的IP, 该IP用于向注册中心注册时使用, 如eureka等
#
#SEATA_PORT
#可选, 指定seata-server启动的端口, 默认为 8091
#
#STORE_MODE
#可选, 指定seata-server的事务日志存储方式, 支持db 和 file, 默认是 file
#
#SERVER_NODE
#可选, 用于指定seata-server节点ID, 如 1,2,3..., 默认为 1
#
#SEATA_ENV
#可选, 指定 seata-server 运行环境, 如 dev, test 等, 服务启动时会使用 registry-dev.conf 这样的配置
#
#SEATA_CONFIG_NAME
#可选, 指定配置文件位置, 如 file:/root/registry, 将会加载 /root/registry.conf 作为配置文件
version: "3.7"
services:
seata-server1:
container_name: seata-server1
image: seataio/seata-server:latest
ports:
- 9608:9608
environment:
SEATA_IP: 47.104.85.99
SEATA_PORT: 9608
STORE_MODE: db
SERVER_NODE: 1
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/seata/seata-config:/root/seata-config
- /opt/epdc/seata/logs:/root/logs/seata

84
esua-epdc/docker-compose/prod/node01/4-seata/script/config.txt

@ -0,0 +1,84 @@
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableClientBatchSendRequest=false
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThread-size=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=8
transport.shutdown.wait=3
service.vgroup_mapping.my_test_tx_group=default
service.vgroup_mapping.epdc-api-server-fescar-service-group=default
service.vgroup_mapping.epdc-demo-server-fescar-service-group=default
service.vgroup_mapping.epdc-user-server-fescar-service-group=default
service.vgroup_mapping.epdc-services-server-fescar-service-group=default
service.vgroup_mapping.epdc-party-server-fescar-service-group=default
service.vgroup_mapping.epdc-heart-server-fescar-service-group=default
service.vgroup_mapping.epdc-neighbor-server-fescar-service-group=default
service.vgroup_mapping.epdc-oss-server-fescar-service-group=default
service.vgroup_mapping.epdc-message-server-fescar-service-group=default
service.vgroup_mapping.epdc-news-server-fescar-service-group=default
service.vgroup_mapping.epdc-job-server-fescar-service-group=default
service.vgroup_mapping.epdc-admin-server-fescar-service-group=default
service.vgroup_mapping.epdc-activiti-server-fescar-service-group=default
service.vgroup_mapping.epdc-kpi-server-fescar-service-group=default
service.vgroup_mapping.epdc-points-server-fescar-service-group=default
service.vgroup_mapping.epdc-webservice-server-fescar-service-group=default
service.vgroup_mapping.epdc-events-server-fescar-service-group=default
service.vgroup_mapping.epdc-custom-server-fescar-service-group=default
service.vgroup_mapping.epdc-analysis-server-fescar-service-group=default
service.vgroup_mapping.epdc-group-server-fescar-service-group=default
service.enableDegrade=false
service.disableGlobalTransaction=false
client.rm.async.commit.buffer.limit=10000
client.rm.lock.retry.internal=10
client.rm.lock.retry.times=30
client.rm.report.retry.count=5
client.rm.lock.retry.policy.branch-rollback-on-conflict=true
client.rm.table.meta.check.enable=false
client.rm.report.success.enable=true
client.tm.commit.retry.count=5
client.tm.rollback.retry.count=5
store.mode=db
store.file.dir=file_store/data
store.file.maxBranchSessionSize=16384
store.file.maxGlobalSessionSize=512
store.file.fileWriteBufferCacheSize=16384
store.file.flushDiskMode=async
store.file.session.reload.read_size=100
store.db.datasource=dbcp
store.db.dbType=mysql
store.db.driverClassName=com.mysql.jdbc.Driver
store.db.url=jdbc:mysql://172.31.171.61:9600/epdc_seata?useUnicode=true
store.db.user=seata
store.db.password=elink888
store.db.minConn=1
store.db.maxConn=3
store.db.global.table=global_table
store.db.branch.table=branch_table
store.db.queryLimit=100
store.db.lockTable=lock_table
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.max.commit.retry.timeout=-1
server.max.rollback.retry.timeout=-1
server.rollback.retry.timeout.unlock.enable=false
client.undo.data.validation=true
client.undo.log.serialization=jackson
server.undo.log.save.days=7
server.undo.log.delete.period=86400000
client.undo.log.table=undo_log
client.log.exceptionRate=100
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898

89
esua-epdc/docker-compose/prod/node01/4-seata/script/nacos-config.sh

@ -0,0 +1,89 @@
#!/usr/bin/env bash
# Copyright 1999-2019 Seata.io Group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at、
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
while getopts ":h:p:g:t:" opt
do
case $opt in
h)
host=$OPTARG
;;
p)
port=$OPTARG
;;
g)
group=$OPTARG
;;
t)
tenant=$OPTARG
;;
?)
echo "\033[31m USAGE OPTION: $0 [-h host] [-p port] [-g group] [-t tenant] \033[0m"
exit 1
;;
esac
done
if [[ -z ${host} ]]; then
host=localhost
fi
if [[ -z ${port} ]]; then
port=8848
fi
if [[ -z ${group} ]]; then
group="SEATA_GROUP"
fi
if [[ -z ${tenant} ]]; then
tenant=""
fi
nacosAddr=$host:$port
contentType="content-type:application/json;charset=UTF-8"
echo "set nacosAddr=$nacosAddr"
echo "set group=$group"
failCount=0
tempLog=$(mktemp -u)
function addConfig() {
curl -X POST -H "${1}" "http://$2/nacos/v1/cs/configs?dataId=$3&group=$group&content=$4&tenant=$tenant" >"${tempLog}" 2>/dev/null
if [[ -z $(cat "${tempLog}") ]]; then
echo "\033[31m Please check the cluster status. \033[0m"
exit 1
fi
if [[ $(cat "${tempLog}") =~ "true" ]]; then
echo "Set $3=$4\033[32m successfully \033[0m"
else
echo "Set $3=$4\033[31m failure \033[0m"
(( failCount++ ))
fi
}
count=0
for line in $(cat $(dirname "$PWD")/config.txt); do
(( count++ ))
key=${line%%=*}
value=${line#*=}
addConfig "${contentType}" "${nacosAddr}" "${key}" "${value}"
done
echo "========================================================================="
echo " Complete initialization parameters, \033[32m total-count:$count \033[0m, \033[31m failure-count:$failCount \033[0m"
echo "========================================================================="
if [[ ${failCount} -eq 0 ]]; then
echo "\033[32m Init nacos config finished, please start seata-server. \033[0m"
else
echo "\033[31m init nacos config fail. \033[0m"
fi

48
esua-epdc/docker-compose/prod/node02/2-nacos/docker-compose.yml

@ -0,0 +1,48 @@
version: "3.7"
services:
nacos2:
image: nacos/nacos-server:latest
container_name: nacos2
ports:
- 10001:10001
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nacos/logs/nacos2:/home/nacos/logs
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_PORT: 9602
NACOS_SERVER_IP: 172.31.171.61 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.31.171.61:9601 172.31.171.61:9602 172.31.171.62:9601 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_SERVICE_HOST: 172.31.171.61 #mysql配置,Master为主节点,Slave为从节点
MYSQL_SERVICE_PORT: 9600
MYSQL_SERVICE_DB_NAME: epdc_nacos
MYSQL_SERVICE_USER: nacos
MYSQL_SERVICE_PASSWORD: elink888
MYSQL_DATABASE_NUM: 2
restart: always
nacos3:
image: nacos/nacos-server:latest
container_name: nacos3
ports:
- 9601:9601
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nacos/logs:/home/nacos/logs
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_PORT: 9601
NACOS_SERVER_IP: 172.31.171.62 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.31.171.61:9601 172.31.171.61:9602 172.31.171.62:9601 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_SERVICE_HOST: 172.31.171.61 #mysql配置,Master为主节点,Slave为从节点
MYSQL_SERVICE_PORT: 9600
MYSQL_SERVICE_DB_NAME: epdc_nacos
MYSQL_SERVICE_USER: nacos
MYSQL_SERVICE_PASSWORD: elink888
MYSQL_DATABASE_NUM: 2
restart: always

73
esua-epdc/docker-compose/prod/node02/5-seata/conf/registry.conf

@ -0,0 +1,73 @@
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
serverAddr = "172.31.171.61:80"
namespace = ""
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "172.31.171.61:80"
namespace = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}

21
esua-epdc/docker-compose/prod/master/application/10.5.34.163-slave/docker-compose.yml → esua-epdc/docker-compose/prod/node02/5-seata/docker-compose.yml

@ -21,15 +21,18 @@
version: "3.7"
services:
seata-server:
seata-server2:
container_name: seata-server2
image: seataio/seata-server:latest
hostname: seata-server
ports:
- 9001:8091
- 9608:9608
environment:
- SEATA_PORT=8091
- SEATA_IP=
- STORE_MODE=db
- SERVER_NODE=1
expose:
- 8091
SEATA_IP: 114.215.125.123
SEATA_PORT: 9608
STORE_MODE: db
SERVER_NODE: 2
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/seata/seata-config:/root/seata-config

39
esua-epdc/docker-compose/test/node01/1-mysql/conf/mysql.conf.cnf

@ -18,28 +18,53 @@ skip-host-cache
skip-name-resolve
# 数据库编号, 要具有唯一性, 不能跟其他数据库重复, 方便同步区分
server-id = 21
server-id = 2
# mysql日志
log_bin = /var/lib/mysql/mysql-bin.log
log-bin-index=slave-relay-bin.index
log-bin-index=/var/lib/mysql/slave-relay-bin.index
#日志记录的格式
binlog_format=MIXED
binlog_format=mixed
#单个日志文件最大
max_binlog_size = 100M 
max_binlog_size=100M 
#从库建议开启,有利于数据一致性
relay_log_recovery = 1   
relay_log_recovery=1   
#如果从库还会用做主库,建议开启
log_slave_updates = 1   
log_slave_updates=1   
# 中继日志:存储所有主库TP过来的binlog事件主库binlog:记录主库发生过的修改事件
# relay-log = /var/lib/mysql/mysql-relay-bin.log
relay-log=/var/lib/mysql/mysql-relay-bin.log
relay-log-index=relay-log.index
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
# 慢查询
slow_query_log=on
long_query_time=3
slow_query_log_file=/var/lib/mysql/mysql-slow.log
# GTID
gtid_mode=on
enforce_gtid_consistency=on
# 此参数设置为ON时,新创建的InnoDB 数据表的数据会存储于一个单独的文件中,而不是存储于共享的ibdata表空间。
innodb-file-per-table=1
innodb_flush_log_at_trx_commit=2
log_warnings=1
# 只读配置
read_only=1
[mysqld_safe]
log-error=/var/lib/mysql/mysqld.log
pid-file=/var/lib/mysql/mysqld.pid
INSERT INTO esua_epdc_events.epdc_events VALUES ('78e89fe1d35170ed3b159a5abdf6eaf0','a897d6b6d8b97bfaaafbb031d9a3d7a2','郭口路3号-马女士','https://wx.qlogo.cn/mmopen/vi_32/q5bEiaAeUgBDj5VbESXLicsic9CSpOeqCGCQLBWXic4lLCf5ayqib5e3DRAeNBc7zCzRMxFyuE5ZhwbyYtuJyTHQZdg/132','1','13156396355','春江水暖鸭先知','青啤社区第四网格',1222016013611073538,'山东省青岛市市北区丹山路88号',36.0762400000,120.3402690000,4,'生机勃勃',0,0,0,0,'0',0,'APP_USER','2020-04-09 15:41:56','1222040755663568897','2020-04-09 18:35:45','0','','','','1175270520603930625,1202809196967714817,1202809398139117570','市北区党委-登州路街道-青啤社区','13','13','其它','1175270520603930625,1202809196967714817,1202809398139117570,1222016013611073538','市北区党委-登州路街道-青啤社区-青啤社区第四网格');

1
esua-epdc/docker-compose/test/node01/2-nginx/docker-compose.yml

@ -7,6 +7,7 @@ services:
- 443:443
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nginx/html:/usr/share/nginx/html
- /opt/epdc/nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/epdc/nginx/conf.d:/etc/nginx/conf.d:ro

1
esua-epdc/docker-compose/test/node01/3-nacos/docker-compose.yml

@ -7,6 +7,7 @@ services:
- 9601:9601
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nacos/logs:/home/nacos/logs
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:

3
esua-epdc/docker-compose/test/node01/4-redis/docker-compose.yml

@ -9,6 +9,7 @@ services:
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-slave01.conf:/usr/local/etc/redis/redis.conf
@ -22,6 +23,7 @@ services:
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-slave02.conf:/usr/local/etc/redis/redis.conf
@ -35,6 +37,7 @@ services:
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-slave03.conf:/usr/local/etc/redis/redis.conf

1
esua-epdc/docker-compose/test/node01/5-seata/docker-compose.yml

@ -34,4 +34,5 @@ services:
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/seata/seata-config:/root/seata-config

35
esua-epdc/docker-compose/test/node02/1-mysql/conf/mysql.conf.cnf

@ -18,19 +18,44 @@ skip-host-cache
skip-name-resolve
# 数据库编号, 要具有唯一性, 不能跟其他数据库重复, 方便同步区分
server-id = 11
server-id=1
#如果从库还会用做主库,建议开启
log_slave_updates=1
# mysql日志
log_bin = /var/lib/mysql/master-bin.log
log-bin-index=master-bin.index
log_bin=/var/lib/mysql/master-bin.log
log-bin-index=/var/lib/mysql/master-bin.index
# binlog日志格式,mysql默认采用statement,建议使用 mixed(是statement和row模式的结合)
binlog_format = mixed
binlog_format=mixed
# 中继日志:存储所有主库TP过来的binlog事件主库binlog:记录主库发生过的修改事件
relay-log=/var/lib/mysql/relay-log
relay-log-index=/var/lib/mysql/relay-log.index
#单个日志文件最大
max_binlog_size = 100M
max_binlog_size=100M
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
# 慢查询
slow_query_log=on
long_query_time=1
slow_query_log_file=/data/mysql/mysql-slow.log
# GTID
gtid_mode=on
enforce_gtid_consistency=on
# 此参数设置为ON时,新创建的InnoDB 数据表的数据会存储于一个单独的文件中,而不是存储于共享的ibdata表空间。
innodb-file-per-table=1
innodb_flush_log_at_trx_commit=2
log_warnings=1
[mysqld_safe]
log-error=/var/lib/mysql/mysqld.log
pid-file=/var/lib/mysql/mysqld.pid

3
esua-epdc/docker-compose/test/node02/1-mysql/docker-compose.yml

@ -15,3 +15,6 @@ services:
- /opt/epdc/mysql/data:/var/lib/mysql
- /opt/epdc/mysql/conf/mysql.conf.cnf:/etc/mysql/conf.d/mysql.conf.cnf
restart: always
mysql -e "show databases;" -uroot -p| grep -Ev "Database|information_schema|mysql|performance_schema" |xargs mysqldump -uroot -p --single-transaction --master-data=2 --no-autocommit --databases --default-character-set=utf8 > mysql_dump.sql

4
esua-epdc/docker-compose/test/node02/2-nacos/docker-compose.yml

@ -6,6 +6,8 @@ services:
ports:
- 9601:9601
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nacos/logs/nacos1:/home/nacos/logs
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
@ -28,6 +30,8 @@ services:
ports:
- 9602:9602
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nacos/logs/nacos2:/home/nacos/logs
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:

2
esua-epdc/docker-compose/test/node02/3-seata/docker-compose.yml

@ -33,5 +33,7 @@ services:
SERVER_NODE: 1
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/seata/seata-config:/root/seata-config
- /opt/epdc/seata/logs:/root/logs/seata

6
esua-epdc/docker-compose/test/node02/4-redis/docker-compose.yml

@ -8,6 +8,8 @@ services:
command: redis-server /usr/local/etc/redis/redis.conf
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-master01.conf:/usr/local/etc/redis/redis.conf
@ -20,6 +22,8 @@ services:
command: redis-server /usr/local/etc/redis/redis.conf
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-master02.conf:/usr/local/etc/redis/redis.conf
@ -32,6 +36,8 @@ services:
command: redis-server /usr/local/etc/redis/redis.conf
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-master03.conf:/usr/local/etc/redis/redis.conf

4
esua-epdc/epdc-admin/epdc-admin-server/Dockerfile

@ -1,5 +1,5 @@
# 基础镜像
FROM openjdk:8u242-jdk-buster
FROM openjdk:8
# 作者
MAINTAINER rongchao@elink-cn.com
# 对应pom.xml文件中的dockerfile-maven-plugin插件JAR_FILE的值
@ -16,5 +16,5 @@ ENV DATAPATH /data
# 挂载/data目录到主机
VOLUME $DATAPATH
# 启动容器时执行
ENTRYPOINT java -jar -Xmx1024m $JAR_PATH
ENTRYPOINT java -jar $JAR_CONFIG $JAR_PATH
EXPOSE ${SERVER_PORT}

16
esua-epdc/epdc-admin/epdc-admin-server/pom.xml

@ -153,8 +153,8 @@
<work.wx.ma.appId>wx29b074840ef4bfd9</work.wx.ma.appId>
<work.wx.ma.secret>4adb1afccc69f205cdf5b521d74e2aca</work.wx.ma.secret>
<!--数据分析端小程序-->
<analysis.wx.ma.appId>wx9b6102a8ee5add65</analysis.wx.ma.appId>
<analysis.wx.ma.secret>394f47d4e08fc0fd629231d3f68a34dc</analysis.wx.ma.secret>
<analysis.wx.ma.appId>wxfa4afaa2b5f9c876</analysis.wx.ma.appId>
<analysis.wx.ma.secret>7db9f049c78c9a6cafa673deebe8330d</analysis.wx.ma.secret>
<!--RocketMQ-->
<rocketmq.name.server>47.104.85.99:9876;114.215.125.123:9876</rocketmq.name.server>
<rocketmq.producer.group>organizationGroup</rocketmq.producer.group>
@ -171,8 +171,8 @@
<!-- nacos -->
<nacos.register-enabled>true</nacos.register-enabled>
<nacos.server-addr>47.104.224.45:8848</nacos.server-addr>
<nacos.ip>219.146.91.110</nacos.ip>
<nacos.namespace>a746dde3-7a13-4521-b986-7369b0b7c269</nacos.namespace>
<nacos.ip>47.104.85.99</nacos.ip>
<nacos.namespace>6a3577b4-7b79-43f6-aebb-9c3f31263f6a</nacos.namespace>
<spring.zipkin.base-url>http://localhost:9411</spring.zipkin.base-url>
<!--亿联小程序配置-->
@ -186,8 +186,8 @@
<work.wx.ma.appId>wx9f20a46906ab2c3e</work.wx.ma.appId>
<work.wx.ma.secret>dc13065f79429979d9f687d249eb5c4e</work.wx.ma.secret>
<!--数据分析端小程序-->
<analysis.wx.ma.appId>wx9b6102a8ee5add65</analysis.wx.ma.appId>
<analysis.wx.ma.secret>394f47d4e08fc0fd629231d3f68a34dc</analysis.wx.ma.secret>
<analysis.wx.ma.appId>wxfa4afaa2b5f9c876</analysis.wx.ma.appId>
<analysis.wx.ma.secret>7db9f049c78c9a6cafa673deebe8330d</analysis.wx.ma.secret>
<!--RocketMQ-->
<rocketmq.name.server>47.104.85.99:9876;114.215.125.123:9876</rocketmq.name.server>
<rocketmq.producer.group>organizationGroup</rocketmq.producer.group>
@ -225,8 +225,8 @@
<work.wx.ma.appId>wx9f20a46906ab2c3e</work.wx.ma.appId>
<work.wx.ma.secret>dc13065f79429979d9f687d249eb5c4e</work.wx.ma.secret>
<!--数据分析端小程序-->
<analysis.wx.ma.appId>wx9b6102a8ee5add65</analysis.wx.ma.appId>
<analysis.wx.ma.secret>394f47d4e08fc0fd629231d3f68a34dc</analysis.wx.ma.secret>
<analysis.wx.ma.appId>wxfa4afaa2b5f9c876</analysis.wx.ma.appId>
<analysis.wx.ma.secret>7db9f049c78c9a6cafa673deebe8330d</analysis.wx.ma.secret>
<spring.datasource.druid.url>
<![CDATA[jdbc:mysql://172.16.0.52:3306/esua_epdc_admin?allowMultiQueries=true&useUnicode=true&characterEncoding=UTF-8&useSSL=false&serverTimezone=Asia/Shanghai]]>

2
esua-epdc/epdc-admin/epdc-admin-server/src/main/resources/application-test.yml

@ -33,4 +33,4 @@ spring:
stat:
log-slow-sql: true
slow-sql-millis: 1000
merge-sql: false
merge-sql: false

4
esua-epdc/epdc-auth/Dockerfile

@ -1,5 +1,5 @@
# 基础镜像
FROM openjdk:8u242-jdk-buster
FROM openjdk:8
# 作者
MAINTAINER rongchao@elink-cn.com
# 对应pom.xml文件中的dockerfile-maven-plugin插件JAR_FILE的值
@ -16,5 +16,5 @@ ENV DATAPATH /data
# 挂载/data目录到主机
VOLUME $DATAPATH
# 启动容器时执行
ENTRYPOINT java -jar -Xmx1024m $JAR_PATH
ENTRYPOINT java -jar $JAR_CONFIG $JAR_PATH
EXPOSE ${SERVER_PORT}

6
esua-epdc/epdc-auth/pom.xml

@ -132,9 +132,9 @@
<!-- redis配置 -->
<spring.redis.index>2</spring.redis.index>
<spring.redis.host>47.104.224.45</spring.redis.host>
<spring.redis.port>6379</spring.redis.port>
<spring.redis.password>elink@888</spring.redis.password>
<spring.redis.host>114.215.125.123</spring.redis.host>
<spring.redis.port>9603</spring.redis.port>
<spring.redis.password>epdc!redis@master1405</spring.redis.password>
<!-- nacos -->
<nacos.register-enabled>true</nacos.register-enabled>

10
esua-epdc/epdc-auth/src/main/resources/logback-spring.xml

@ -144,10 +144,10 @@
<!-- 生产环境 -->
<springProfile name="prod">
<logger name="org.springframework.web" level="ERROR"/>
<logger name="org.springboot.sample" level="ERROR"/>
<logger name="com.elink.esua.epdc" level="ERROR"/>
<root level="ERROR">
<logger name="org.springframework.web" level="INFO"/>
<logger name="org.springboot.sample" level="INFO"/>
<logger name="com.elink.esua.epdc" level="INFO"/>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="DEBUG_FILE"/>
<appender-ref ref="INFO_FILE"/>
@ -156,4 +156,4 @@
</root>
</springProfile>
</configuration>
</configuration>

6
esua-epdc/epdc-commons/epdc-common-clienttoken/pom.xml

@ -23,6 +23,12 @@
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.esua.epdc</groupId>
<artifactId>epdc-user-client</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>

12
esua-epdc/epdc-commons/epdc-common-clienttoken/src/main/java/com/elink/esua/epdc/common/token/dto/TokenDto.java

@ -1,8 +1,11 @@
package com.elink.esua.epdc.common.token.dto;
import com.elink.esua.epdc.dto.UserTagInfo;
import lombok.Data;
import java.io.Serializable;
import java.util.Date;
import java.util.List;
/**
* 用户token
@ -46,7 +49,12 @@ public class TokenDto implements Serializable {
private Long gridId;
/**
* 党员标识 01
* 性别(女性-0男性-1)
*/
private String partyFlag;
private String sex;
/**
* 用户标签信息列表
*/
private List<UserTagInfo> userTagInfos;
}

20
esua-epdc/epdc-commons/epdc-common-clienttoken/src/main/java/com/elink/esua/epdc/common/token/util/CpUserDetailRedis.java

@ -40,6 +40,26 @@ public class CpUserDetailRedis {
redisUtils.hMSet(key, map, expire);
}
/**
* 缓存用户信息不同于{@link CpUserDetailRedis#set(TokenDto, long)}之处在于存放进redis时拼接key的后缀可以自定义
*
* @param user 用户信息
* @param expire 超时时长
* @param redisKeySuffix redis的key的后缀
* @return void
* @author work@yujt.net.cn
* @date 2020/1/31 15:13
*/
public void set(TokenDto user, long expire, String redisKeySuffix) {
if (user == null) {
return;
}
String key = RedisKeys.getCpUserKey(redisKeySuffix);
//bean to map
Map<String, Object> map = BeanUtil.beanToMap(user, false, true);
redisUtils.hMSet(key, map, expire);
}
/**
* 获取token信息
*

36
esua-epdc/epdc-commons/epdc-commons-mybatis/src/main/java/com/elink/esua/epdc/commons/mybatis/entity/DeptScope.java

@ -0,0 +1,36 @@
package com.elink.esua.epdc.commons.mybatis.entity;
import lombok.Data;
import java.io.Serializable;
/**
* 部门冗余字段基类
*
* @author rongchao
* @Date 19-12-18
*/
@Data
public abstract class DeptScope extends BaseEpdcEntity implements Serializable {
/***
*所有部门名称
*/
private String allDeptNames;
/***
*所有部门ID
*/
private String allDeptIds;
/***
*父所有部门
*/
private String parentDeptNames;
/***
*父所有部门
*/
private String parentDeptIds;
}

74
esua-epdc/epdc-commons/epdc-commons-mybatis/src/main/java/com/elink/esua/epdc/commons/mybatis/utils/DeptEntityUtils.java

@ -0,0 +1,74 @@
package com.elink.esua.epdc.commons.mybatis.utils;
import com.elink.esua.epdc.commons.mybatis.entity.DeptScope;
import lombok.Data;
/**
* 部门信息实体工具类
*
* @author rongchao
* @Date 19-12-18
*/
public class DeptEntityUtils {
@Data
public static class DeptDto {
/**
* 父所有部门ID
*/
private String parentDeptIds;
/**
* 父所有部门
*/
private String parentDeptNames;
/**
* 所有部门ID
*/
private String allDeptIds;
/**
* 所有部门
*/
private String allDeptNames;
}
/**
* 装载部门信息
*
* @param dto
* @param entityClass
* @return T
* @author rongchao
* @since 2019-12-18
*/
public static <T extends DeptScope> T loadDeptInfo(DeptDto dto, Class<T> entityClass) {
try {
T t = entityClass.newInstance();
t.setAllDeptIds(dto.getAllDeptIds());
t.setAllDeptNames(dto.getAllDeptNames());
t.setParentDeptIds(dto.getParentDeptIds());
t.setParentDeptNames(dto.getParentDeptNames());
return t;
} catch (InstantiationException e) {
e.printStackTrace();
} catch (IllegalAccessException e) {
e.printStackTrace();
}
return null;
}
/**
* 装载部门信息
*
* @param dto
* @param entity
* @return void
* @author rongchao
* @since 2019-12-18
*/
public static <T extends DeptScope> void loadDeptInfo(DeptDto dto, T entity) {
entity.setAllDeptIds(dto.getAllDeptIds());
entity.setAllDeptNames(dto.getAllDeptNames());
entity.setParentDeptIds(dto.getParentDeptIds());
entity.setParentDeptNames(dto.getParentDeptNames());
}
}

2
esua-epdc/epdc-commons/epdc-commons-tools-wx-ma/pom.xml

@ -31,4 +31,4 @@
</dependency>
</dependencies>
</project>
</project>

12
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/NacosConfigConstant.java

@ -0,0 +1,12 @@
package com.elink.esua.epdc.commons.tools.constant;
/**
* Nacos配置中心相关常量
*
* @author rongchao
* @Date 20-1-15
*/
public interface NacosConfigConstant {
String CONFIG_GROUP = "EPDC_CONFIG_GROUP";
}

1
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/NumConstant.java

@ -29,6 +29,7 @@ public interface NumConstant {
int THIRTY = 30;
int FORTY = 40;
int FIFTY = 50;
int SIXTY = 60;
int ONE_HUNDRED = 100;
long ZERO_L = 0L;

13
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/PointsConstant.java

@ -0,0 +1,13 @@
package com.elink.esua.epdc.commons.tools.constant;
/**
* @Auther: yinzuomei
* @Date: 2019/12/16 19:38
* @Description: 积分用
*/
public interface PointsConstant {
/**
* 手动调整积分编码
*/
String ruleCode ="hand_regulation";
}

5
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/StrConstant.java

@ -38,4 +38,9 @@ public interface StrConstant {
* 冒号
*/
String COLON = ":";
/**
* 问号
*/
String QUESTION_MARK = "?";
}

35
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/UserAuthTypeEnum.java

@ -0,0 +1,35 @@
package com.elink.esua.epdc.commons.tools.enums;
/**
* 用户认证类别枚举类
*
* @author rongchao
* @Date 19-12-19
*/
public enum UserAuthTypeEnum {
/**
* 居民认证
*/
RESIDENT_AUTH("0"),
/**
* 党员认证
*/
PARTY_AUTH("1"),
/**
* 志愿者认证
*/
VOLUNTEER_AUTH("2");
private String value;
UserAuthTypeEnum(String value) {
this.value = value;
}
public String value() {
return this.value;
}
}

28
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/UserTagEnum.java

@ -0,0 +1,28 @@
package com.elink.esua.epdc.commons.tools.enums;
/**
* @Author: yinzuomei
* @Date: 2019/12/17 19:11
* @Description: 用户身份枚举类
*/
public enum UserTagEnum {
/**
* 党员
*/
PARTY_MEMBER("partymember"),
/**
* 志愿者
*/
VOLUNTEER("volunteer");
private String value;
UserTagEnum(String value) {
this.value = value;
}
public String value() {
return this.value;
}
}

40
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsBehaviorCodeEnum.java

@ -0,0 +1,40 @@
package com.elink.esua.epdc.commons.tools.enums.pointsenum;
/**
* @Auther: yinzuomei
* @Date: 2020/2/6 14:01
* @Description: 积分行为编码
*/
public enum PointsBehaviorCodeEnum {
LIKE("like", "赞"),
DISLIKE("dislike", "踩"),
SHARE("share", "分享"),
CLOCK("clock", "打卡"),
COMMENT("comment", "评论"),
BREAK_PROMISE("break_promise", "爽约"),
JOIN_ACT("join_act", "活动积分");
private String behaviorCode;
private String name;
PointsBehaviorCodeEnum(String behaviorCode, String name) {
this.behaviorCode = behaviorCode;
this.name = name;
}
public String getBehaviorCode() {
return behaviorCode;
}
public void setBehaviorCode(String behaviorCode) {
this.behaviorCode = behaviorCode;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}

27
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsLimitTimeEnum.java

@ -0,0 +1,27 @@
package com.elink.esua.epdc.commons.tools.enums.pointsenum;
/**
* @Auther: yinzuomei
* @Date: 2019/12/13 09:43
* @Description: 积分规则限制时限枚举类
*/
public enum PointsLimitTimeEnum {
/**
* 限制时限(0-分钟1-小时2-3-4-)
*/
LIMIT_TIME_MINUTE("0"),
LIMIT_TIME_HOUR("1"),
LIMIT_TIME_DAY("2"),
LIMIT_TIME_MONTH("3"),
LIMIT_TIME_YEAR("4");
private String value;
PointsLimitTimeEnum(String value) {
this.value = value;
}
public String value() {
return value;
}
}

24
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsOperationEnum.java

@ -0,0 +1,24 @@
package com.elink.esua.epdc.commons.tools.enums.pointsenum;
/**
* @Auther: yinzuomei
* @Date: 2019/12/13 09:31
* @Description: 积分操作类型枚举类
*/
public enum PointsOperationEnum {
/**
* 规则操作类型(0-减积分1-加积分)
*/
OPERATION_TYPE_ADD("1"),
OPERATION_TYPE_SUBSTRACT("0");
private String operationType;
PointsOperationEnum(String operationType) {
this.operationType = operationType;
}
public String getOperationType() {
return operationType;
}
}

34
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsOperationModeEnum.java

@ -0,0 +1,34 @@
package com.elink.esua.epdc.commons.tools.enums.pointsenum;
/**
* @Auther: yinzuomei
* @Date: 2019/12/13 09:33
* @Description: 积分操作方式枚举类
*/
public enum PointsOperationModeEnum {
/**
* user-用户操作
*/
OPERATION_MODE_USER("user"),
/**
* admin-管理员操作
*/
OPERATION_MODE_ADMIN("admin"),
/**
* sys-系统操作
*/
OPERATION_MODE_SYS("sys");
private String operationMode;
PointsOperationModeEnum(String operationMode) {
this.operationMode = operationMode;
}
public String getOperationMode() {
return operationMode;
}
}

26
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsRuleAvailableEnum.java

@ -0,0 +1,26 @@
package com.elink.esua.epdc.commons.tools.enums.pointsenum;
/**
* @Auther: yinzuomei
* @Date: 2019/12/12 15:04
* @Description: 积分规则可用标志枚举类
*/
public enum PointsRuleAvailableEnum {
/**
* 可用标记(0-不可用1-可用)
*/
AVAILABLE_TRUE("1"),
AVAILABLE_FALSE("0");
private String value;
PointsRuleAvailableEnum(String value) {
this.value = value;
}
public String value() {
return value;
}
}

36
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsRuleCodeEnum.java

@ -0,0 +1,36 @@
package com.elink.esua.epdc.commons.tools.enums.pointsenum;
/**
* @Auther: yinzuomei
* @Date: 2020/2/6 14:00
* @Description: 积分规则编码
*/
public enum PointsRuleCodeEnum {
CANCEL_ACT("cancel_act", "取消报名系统扣减积分"),
CONFIRM_JOIN_ACT("confirm_join_act", "参与活动确认积分");
private String ruleCode;
private String name;
PointsRuleCodeEnum(String ruleCode, String name) {
this.ruleCode = ruleCode;
this.name = name;
}
public String getRuleCode() {
return ruleCode;
}
public void setRuleCode(String ruleCode) {
this.ruleCode = ruleCode;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}

28
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/enums/pointsenum/PointsUpperLimitEnum.java

@ -0,0 +1,28 @@
package com.elink.esua.epdc.commons.tools.enums.pointsenum;
/**
* @Auther: yinzuomei
* @Date: 2019/12/13 09:48
* @Description: 积分是否有上限限制 枚举类
*/
public enum PointsUpperLimitEnum {
/**
*
*/
YES("1"),
/**
*
*/
NO("0");
private String value;
PointsUpperLimitEnum(String value) {
this.value = value;
}
public String value() {
return value;
}
}

271
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/utils/GPSUtils.java

@ -1,146 +1,155 @@
package com.elink.esua.epdc.commons.tools.utils;
/**
* @Description TODO
* @Description
* @Author yinzuomei
* @Date 2019/12/27 10:00
*/
public class GPSUtils {
public static double pi = 3.1415926535897932384626;
public static double x_pi = 3.14159265358979324 * 3000.0 / 180.0;
public static double a = 6378245.0;
public static double ee = 0.00669342162296594323;
public static double pi = 3.1415926535897932384626;
public static double x_pi = 3.14159265358979324 * 3000.0 / 180.0;
public static double a = 6378245.0;
public static double ee = 0.00669342162296594323;
public static double transformLat(double x, double y) {
double ret = -100.0 + 2.0 * x + 3.0 * y + 0.2 * y * y + 0.1 * x * y
+ 0.2 * Math.sqrt(Math.abs(x));
ret += (20.0 * Math.sin(6.0 * x * pi) + 20.0 * Math.sin(2.0 * x * pi)) * 2.0 / 3.0;
ret += (20.0 * Math.sin(y * pi) + 40.0 * Math.sin(y / 3.0 * pi)) * 2.0 / 3.0;
ret += (160.0 * Math.sin(y / 12.0 * pi) + 320 * Math.sin(y * pi / 30.0)) * 2.0 / 3.0;
return ret;
}
public static double transformLat(double x, double y) {
double ret = -100.0 + 2.0 * x + 3.0 * y + 0.2 * y * y + 0.1 * x * y
+ 0.2 * Math.sqrt(Math.abs(x));
ret += (20.0 * Math.sin(6.0 * x * pi) + 20.0 * Math.sin(2.0 * x * pi)) * 2.0 / 3.0;
ret += (20.0 * Math.sin(y * pi) + 40.0 * Math.sin(y / 3.0 * pi)) * 2.0 / 3.0;
ret += (160.0 * Math.sin(y / 12.0 * pi) + 320 * Math.sin(y * pi / 30.0)) * 2.0 / 3.0;
return ret;
}
public static double transformLon(double x, double y) {
double ret = 300.0 + x + 2.0 * y + 0.1 * x * x + 0.1 * x * y + 0.1
* Math.sqrt(Math.abs(x));
ret += (20.0 * Math.sin(6.0 * x * pi) + 20.0 * Math.sin(2.0 * x * pi)) * 2.0 / 3.0;
ret += (20.0 * Math.sin(x * pi) + 40.0 * Math.sin(x / 3.0 * pi)) * 2.0 / 3.0;
ret += (150.0 * Math.sin(x / 12.0 * pi) + 300.0 * Math.sin(x / 30.0
* pi)) * 2.0 / 3.0;
return ret;
}
public static double[] transform(double lat, double lon) {
if (outOfChina(lat, lon)) {
return new double[]{lat,lon};
}
double dLat = transformLat(lon - 105.0, lat - 35.0);
double dLon = transformLon(lon - 105.0, lat - 35.0);
double radLat = lat / 180.0 * pi;
double magic = Math.sin(radLat);
magic = 1 - ee * magic * magic;
double sqrtMagic = Math.sqrt(magic);
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi);
dLon = (dLon * 180.0) / (a / sqrtMagic * Math.cos(radLat) * pi);
double mgLat = lat + dLat;
double mgLon = lon + dLon;
return new double[]{mgLat,mgLon};
}
public static boolean outOfChina(double lat, double lon) {
if (lon < 72.004 || lon > 137.8347)
return true;
if (lat < 0.8293 || lat > 55.8271)
return true;
return false;
}
/**
* 84 to 火星坐标系 (GCJ-02) World Geodetic System ==> Mars Geodetic System
*
* @param lat
* @param lon
* @return
*/
public static double[] gps84_To_Gcj02(double lat, double lon) {
if (outOfChina(lat, lon)) {
return new double[]{lat,lon};
}
double dLat = transformLat(lon - 105.0, lat - 35.0);
double dLon = transformLon(lon - 105.0, lat - 35.0);
double radLat = lat / 180.0 * pi;
double magic = Math.sin(radLat);
magic = 1 - ee * magic * magic;
double sqrtMagic = Math.sqrt(magic);
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi);
dLon = (dLon * 180.0) / (a / sqrtMagic * Math.cos(radLat) * pi);
double mgLat = lat + dLat;
double mgLon = lon + dLon;
return new double[]{mgLat, mgLon};
}
public static double transformLon(double x, double y) {
double ret = 300.0 + x + 2.0 * y + 0.1 * x * x + 0.1 * x * y + 0.1
* Math.sqrt(Math.abs(x));
ret += (20.0 * Math.sin(6.0 * x * pi) + 20.0 * Math.sin(2.0 * x * pi)) * 2.0 / 3.0;
ret += (20.0 * Math.sin(x * pi) + 40.0 * Math.sin(x / 3.0 * pi)) * 2.0 / 3.0;
ret += (150.0 * Math.sin(x / 12.0 * pi) + 300.0 * Math.sin(x / 30.0
* pi)) * 2.0 / 3.0;
return ret;
}
/**
* * 火星坐标系 (GCJ-02) to 84 * * @param lon * @param lat * @return
* */
public static double[] gcj02_To_Gps84(double lat, double lon) {
double[] gps = transform(lat, lon);
double lontitude = lon * 2 - gps[1];
double latitude = lat * 2 - gps[0];
return new double[]{latitude, lontitude};
}
/**
* 火星坐标系 (GCJ-02) 与百度坐标系 (BD-09) 的转换算法 GCJ-02 坐标转换成 BD-09 坐标
*
* @param lat
* @param lon
*/
public static double[] gcj02_To_Bd09(double lat, double lon) {
double x = lon, y = lat;
double z = Math.sqrt(x * x + y * y) + 0.00002 * Math.sin(y * x_pi);
double theta = Math.atan2(y, x) + 0.000003 * Math.cos(x * x_pi);
double tempLon = z * Math.cos(theta) + 0.0065;
double tempLat = z * Math.sin(theta) + 0.006;
double[] gps = {tempLat,tempLon};
return gps;
}
public static double[] transform(double lat, double lon) {
if (outOfChina(lat, lon)) {
return new double[]{lat, lon};
}
double dLat = transformLat(lon - 105.0, lat - 35.0);
double dLon = transformLon(lon - 105.0, lat - 35.0);
double radLat = lat / 180.0 * pi;
double magic = Math.sin(radLat);
magic = 1 - ee * magic * magic;
double sqrtMagic = Math.sqrt(magic);
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi);
dLon = (dLon * 180.0) / (a / sqrtMagic * Math.cos(radLat) * pi);
double mgLat = lat + dLat;
double mgLon = lon + dLon;
return new double[]{mgLat, mgLon};
}
/**
* * 火星坐标系 (GCJ-02) 与百度坐标系 (BD-09) 的转换算法 * * BD-09 坐标转换成GCJ-02 坐标 * * @param
* bd_lat * @param bd_lon * @return
*/
public static double[] bd09_To_Gcj02(double lat, double lon) {
double x = lon - 0.0065, y = lat - 0.006;
double z = Math.sqrt(x * x + y * y) - 0.00002 * Math.sin(y * x_pi);
double theta = Math.atan2(y, x) - 0.000003 * Math.cos(x * x_pi);
double tempLon = z * Math.cos(theta);
double tempLat = z * Math.sin(theta);
double[] gps = {tempLat,tempLon};
return gps;
}
public static boolean outOfChina(double lat, double lon) {
if (lon < 72.004 || lon > 137.8347)
return true;
if (lat < 0.8293 || lat > 55.8271)
return true;
return false;
}
/**将gps84转为bd09
* @param lat
* @param lon
* @return
*/
public static double[] gps84_To_bd09(double lat,double lon){
double[] gcj02 = gps84_To_Gcj02(lat,lon);
double[] bd09 = gcj02_To_Bd09(gcj02[0],gcj02[1]);
return bd09;
}
public static double[] bd09_To_gps84(double lat,double lon){
double[] gcj02 = bd09_To_Gcj02(lat, lon);
double[] gps84 = gcj02_To_Gps84(gcj02[0], gcj02[1]);
//保留小数点后六位
gps84[0] = retain6(gps84[0]);
gps84[1] = retain6(gps84[1]);
return gps84;
}
/**
* 84 to 火星坐标系 (GCJ-02) World Geodetic System ==> Mars Geodetic System
*
* @param lat
* @param lon
* @return
*/
public static double[] gps84_To_Gcj02(double lat, double lon) {
if (outOfChina(lat, lon)) {
return new double[]{lat, lon};
}
double dLat = transformLat(lon - 105.0, lat - 35.0);
double dLon = transformLon(lon - 105.0, lat - 35.0);
double radLat = lat / 180.0 * pi;
double magic = Math.sin(radLat);
magic = 1 - ee * magic * magic;
double sqrtMagic = Math.sqrt(magic);
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi);
dLon = (dLon * 180.0) / (a / sqrtMagic * Math.cos(radLat) * pi);
double mgLat = lat + dLat;
double mgLon = lon + dLon;
return new double[]{mgLat, mgLon};
}
/**保留小数点后六位
* @param num
* @return
*/
private static double retain6(double num){
String result = String .format("%.6f", num);
return Double.valueOf(result);
}
/**
* * 火星坐标系 (GCJ-02) to 84 * * @param lon * @param lat * @return
*/
public static double[] gcj02_To_Gps84(double lat, double lon) {
double[] gps = transform(lat, lon);
double lontitude = lon * 2 - gps[1];
double latitude = lat * 2 - gps[0];
return new double[]{latitude, lontitude};
}
/**
* 火星坐标系 (GCJ-02) 与百度坐标系 (BD-09) 的转换算法 GCJ-02 坐标转换成 BD-09 坐标
*
* @param lat
* @param lon
*/
public static double[] gcj02_To_Bd09(double lat, double lon) {
double x = lon, y = lat;
double z = Math.sqrt(x * x + y * y) + 0.00002 * Math.sin(y * x_pi);
double theta = Math.atan2(y, x) + 0.000003 * Math.cos(x * x_pi);
double tempLon = z * Math.cos(theta) + 0.0065;
double tempLat = z * Math.sin(theta) + 0.006;
double[] gps = {tempLat, tempLon};
return gps;
}
/**
* * 火星坐标系 (GCJ-02) 与百度坐标系 (BD-09) 的转换算法 * * BD-09 坐标转换成GCJ-02 坐标 * * @param
* bd_lat * @param bd_lon * @return
*/
public static double[] bd09_To_Gcj02(double lat, double lon) {
double x = lon - 0.0065, y = lat - 0.006;
double z = Math.sqrt(x * x + y * y) - 0.00002 * Math.sin(y * x_pi);
double theta = Math.atan2(y, x) - 0.000003 * Math.cos(x * x_pi);
double tempLon = z * Math.cos(theta);
double tempLat = z * Math.sin(theta);
double[] gps = {tempLat, tempLon};
return gps;
}
/**
* 将gps84转为bd09
*
* @param lat
* @param lon
* @return
*/
public static double[] gps84_To_bd09(double lat, double lon) {
double[] gcj02 = gps84_To_Gcj02(lat, lon);
double[] bd09 = gcj02_To_Bd09(gcj02[0], gcj02[1]);
return bd09;
}
public static double[] bd09_To_gps84(double lat, double lon) {
double[] gcj02 = bd09_To_Gcj02(lat, lon);
double[] gps84 = gcj02_To_Gps84(gcj02[0], gcj02[1]);
//保留小数点后六位
gps84[0] = retain6(gps84[0]);
gps84[1] = retain6(gps84[1]);
return gps84;
}
/**
* 保留小数点后六位
*
* @param num
* @return
*/
private static double retain6(double num) {
String result = String.format("%.6f", num);
return Double.valueOf(result);
}
}

36
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/utils/LocalDateUtils.java

@ -1,6 +1,9 @@
package com.elink.esua.epdc.commons.tools.utils;
import com.elink.esua.epdc.commons.tools.constant.NumConstant;
import java.math.BigDecimal;
import java.time.*;
import java.util.Date;
@ -48,4 +51,37 @@ public class LocalDateUtils {
return Date.from(instant);
}
/**
* 计算两个时间间隔小时数
*
* @param start 早的时间点
* @param end 晚的时间点
* @param scale 保留几位小数
* @return java.math.BigDecimal
* @author work@yujt.net.cn
* @date 2020/4/3 17:27
*/
public static BigDecimal durationHours(Date start, Date end, int scale) {
return durationHours(dateToLocalDateTime(start), dateToLocalDateTime(end), scale);
}
/**
* {@link LocalDateUtils#durationHours(Date, Date, int)}
*
* @param start
* @param end
* @param scale
* @return java.math.BigDecimal
* @author work@yujt.net.cn
* @date 2020/4/3 17:33
*/
public static BigDecimal durationHours(LocalDateTime start, LocalDateTime end, int scale) {
// 两个时间的间隔,早的时间点在前,晚的再后,否则为负数
Duration duration = Duration.between(start, end);
// 间隔多少分钟,
long minutes = duration.toMinutes();
return BigDecimal.valueOf(minutes).divide(BigDecimal.valueOf(NumConstant.SIXTY), scale, BigDecimal.ROUND_HALF_UP);
}
}

246
esua-epdc/epdc-gateway/pom.xml

@ -1,246 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.esua.epdc</groupId>
<artifactId>esua-epdc</artifactId>
<version>1.0.0</version>
</parent>
<artifactId>epdc-gateway</artifactId>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>com.esua.epdc</groupId>
<artifactId>epdc-commons-tools</artifactId>
<version>1.0.0</version>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-gateway</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-netflix-hystrix</artifactId>
</dependency>
<dependency>
<groupId>de.codecentric</groupId>
<artifactId>spring-boot-admin-starter-client</artifactId>
<version>${spring.boot.admin.version}</version>
</dependency>
<!-- zipkin client -->
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-zipkin</artifactId>
</dependency>
<dependency>
<groupId>com.esua.epdc</groupId>
<artifactId>epdc-common-clienttoken</artifactId>
<version>1.0.0</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.esua.epdc</groupId>
<artifactId>epdc-common-clienttoken</artifactId>
<version>1.0.0</version>
</dependency>
</dependencies>
<build>
<finalName>${project.artifactId}</finalName>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>true</skipTests>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-deploy-plugin</artifactId>
<configuration>
<skip>true</skip>
</configuration>
</plugin>
<plugin>
<groupId>com.spotify</groupId>
<artifactId>dockerfile-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>dev</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<properties>
<server.port>9094</server.port>
<spring.profiles.active>dev</spring.profiles.active>
<docker.tag>dev</docker.tag>
<!-- redis配置 -->
<spring.redis.index>2</spring.redis.index>
<spring.redis.host>47.104.224.45</spring.redis.host>
<spring.redis.port>6379</spring.redis.port>
<spring.redis.password>elink@888</spring.redis.password>
<!-- gateway routes -->
<gateway.routes.epdc-auth-server.uri>lb://epdc-auth-server</gateway.routes.epdc-auth-server.uri>
<!-- <gateway.routes.epdc-admin-server.uri>lb://epdc-admin-server</gateway.routes.epdc-admin-server.uri>-->
<gateway.routes.epdc-admin-server.uri>http://127.0.0.1:9092</gateway.routes.epdc-admin-server.uri>
<gateway.routes.epdc-activiti-server.uri>lb://epdc-activiti-server
</gateway.routes.epdc-activiti-server.uri>
<gateway.routes.epdc-api-server.uri>lb://epdc-api-server</gateway.routes.epdc-api-server.uri>
<!-- <gateway.routes.epdc-api-server.uri>http://127.0.0.1:9040</gateway.routes.epdc-api-server.uri>-->
<gateway.routes.epdc-app-server.uri>lb://epdc-app-server</gateway.routes.epdc-app-server.uri>
<!-- <gateway.routes.epdc-app-server.uri>http://127.0.0.1:9058</gateway.routes.epdc-app-server.uri>-->
<gateway.routes.epdc-heart-server.uri>lb://epdc-heart-server</gateway.routes.epdc-heart-server.uri>
<!-- <gateway.routes.epdc-heart-server.uri>http://127.0.0.1:9060</gateway.routes.epdc-heart-server.uri>-->
<gateway.routes.epdc-job-server.uri>lb://epdc-job-server</gateway.routes.epdc-job-server.uri>
<!-- <gateway.routes.epdc-job-server.uri>http://127.0.0.1:9061</gateway.routes.epdc-job-server.uri>-->
<gateway.routes.epdc-message-server.uri>lb://epdc-message-server
</gateway.routes.epdc-message-server.uri>
<!--<gateway.routes.epdc-news-server.uri>lb://epdc-news-server</gateway.routes.epdc-news-server.uri>-->
<gateway.routes.epdc-news-server.uri>http://127.0.0.1:9064</gateway.routes.epdc-news-server.uri>
<gateway.routes.epdc-oss-server.uri>lb://epdc-oss-server</gateway.routes.epdc-oss-server.uri>
<gateway.routes.epdc-events-server.uri>lb://epdc-events-server</gateway.routes.epdc-events-server.uri>
<!-- <gateway.routes.epdc-events-server.uri>http://127.0.0.1:9066</gateway.routes.epdc-events-server.uri>-->
<gateway.routes.epdc-cloud-analysis-server.uri>http://127.0.0.1:9060</gateway.routes.epdc-cloud-analysis-server.uri>
<!-- <gateway.routes.epdc-cloud-analysis-server.uri>lb://epdc-cloud-analysis-server</gateway.routes.epdc-cloud-analysis-server.uri>-->
<gateway.routes.epdc-work-record-server.uri>http://127.0.0.1:9085</gateway.routes.epdc-work-record-server.uri>
<!-- <gateway.routes.epdc-work-record-server.uri>lb://epdc-work-record-server</gateway.routes.epdc-work-record-server.uri>-->
<gateway.routes.epdc-services-server.uri>lb://epdc-services-server
</gateway.routes.epdc-services-server.uri>
<!-- <gateway.routes.epdc-services-server.uri>http://127.0.0.1:9067</gateway.routes.epdc-services-server.uri>-->
<!--<gateway.routes.epdc-user-server.uri>lb://epdc-user-server</gateway.routes.epdc-user-server.uri>-->
<gateway.routes.epdc-user-server.uri>http://127.0.0.1:9068</gateway.routes.epdc-user-server.uri>
<gateway.routes.epdc-demo-server.uri>lb://epdc-demo-server</gateway.routes.epdc-demo-server.uri>
<gateway.routes.epdc-group-server.uri>http://127.0.0.1:9063</gateway.routes.epdc-group-server.uri>
<!--<gateway.routes.epdc-group-server.uri>lb://epdc-group-server</gateway.routes.epdc-group-server.uri>-->
<gateway.routes.epdc-websocket-server.uri>lb://epdc-websocket-server</gateway.routes.epdc-websocket-server.uri>
<gateway.routes.epdc-kpi-server.uri>lb://epdc-kpi-server</gateway.routes.epdc-kpi-server.uri>
<!-- <gateway.routes.epdc-custom-server.uri>http://127.0.0.1:9076</gateway.routes.epdc-kpi-server.uri>-->
<gateway.routes.epdc-custom-server.uri>lb://epdc-custom-server</gateway.routes.epdc-custom-server.uri>
<!-- <gateway.routes.epdc-analysis-server.uri>http://127.0.0.1:9077</gateway.routes.epdc-analysis-server.uri>-->
<gateway.routes.epdc-analysis-server.uri>lb://epdc-analysis-server</gateway.routes.epdc-analysis-server.uri>
<!-- nacos -->
<nacos.register-enabled>false</nacos.register-enabled>
<nacos.server-addr>47.104.224.45:8848</nacos.server-addr>
<nacos.ip></nacos.ip>
<nacos.namespace>6a3577b4-7b79-43f6-aebb-9c3f31263f6a</nacos.namespace>
<spring.zipkin.base-url>http://localhost:9411</spring.zipkin.base-url>
</properties>
</profile>
<profile>
<id>test</id>
<properties>
<server.port>10000</server.port>
<spring.profiles.active>test</spring.profiles.active>
<docker.tag>test</docker.tag>
<!-- redis配置 -->
<spring.redis.index>2</spring.redis.index>
<spring.redis.host>47.104.224.45</spring.redis.host>
<spring.redis.port>6379</spring.redis.port>
<spring.redis.password>elink@888</spring.redis.password>
<!-- gateway routes -->
<gateway.routes.epdc-auth-server.uri>lb://epdc-auth-server</gateway.routes.epdc-auth-server.uri>
<gateway.routes.epdc-admin-server.uri>lb://epdc-admin-server</gateway.routes.epdc-admin-server.uri>
<gateway.routes.epdc-activiti-server.uri>lb://epdc-activiti-server
</gateway.routes.epdc-activiti-server.uri>
<gateway.routes.epdc-api-server.uri>lb://epdc-api-server</gateway.routes.epdc-api-server.uri>
<gateway.routes.epdc-app-server.uri>lb://epdc-app-server</gateway.routes.epdc-app-server.uri>
<gateway.routes.epdc-heart-server.uri>lb://epdc-heart-server</gateway.routes.epdc-heart-server.uri>
<gateway.routes.epdc-job-server.uri>lb://epdc-job-server</gateway.routes.epdc-job-server.uri>
<gateway.routes.epdc-message-server.uri>lb://epdc-message-server
</gateway.routes.epdc-message-server.uri>
<gateway.routes.epdc-news-server.uri>lb://epdc-news-server</gateway.routes.epdc-news-server.uri>
<gateway.routes.epdc-oss-server.uri>lb://epdc-oss-server</gateway.routes.epdc-oss-server.uri>
<gateway.routes.epdc-events-server.uri>lb://epdc-events-server</gateway.routes.epdc-events-server.uri>
<gateway.routes.epdc-cloud-analysis-server.uri>lb://epdc-cloud-analysis-server</gateway.routes.epdc-cloud-analysis-server.uri>
<gateway.routes.epdc-work-record-server.uri>lb://epdc-work-record-server</gateway.routes.epdc-work-record-server.uri>
<gateway.routes.epdc-services-server.uri>lb://epdc-services-server
</gateway.routes.epdc-services-server.uri>
<gateway.routes.epdc-user-server.uri>lb://epdc-user-server</gateway.routes.epdc-user-server.uri>
<gateway.routes.epdc-demo-server.uri>lb://epdc-demo-server</gateway.routes.epdc-demo-server.uri>
<gateway.routes.epdc-group-server.uri>lb://epdc-group-server</gateway.routes.epdc-group-server.uri>
<gateway.routes.epdc-websocket-server.uri>lb://epdc-websocket-server</gateway.routes.epdc-websocket-server.uri>
<gateway.routes.epdc-kpi-server.uri>lb://epdc-kpi-server</gateway.routes.epdc-kpi-server.uri>
<gateway.routes.epdc-custom-server.uri>lb://epdc-custom-server</gateway.routes.epdc-custom-server.uri>
<gateway.routes.epdc-analysis-server.uri>lb://epdc-analysis-server</gateway.routes.epdc-analysis-server.uri>
<!-- nacos -->
<nacos.register-enabled>true</nacos.register-enabled>
<nacos.server-addr>47.104.224.45:8848</nacos.server-addr>
<nacos.ip>47.104.85.99</nacos.ip>
<nacos.namespace>6a3577b4-7b79-43f6-aebb-9c3f31263f6a</nacos.namespace>
<spring.zipkin.base-url>http://localhost:9411</spring.zipkin.base-url>
</properties>
</profile>
<profile>
<id>prod</id>
<properties>
<server.port>9094</server.port>
<spring.profiles.active>prod</spring.profiles.active>
<docker.tag>prod</docker.tag>
<!-- gateway routes -->
<gateway.routes.epdc-auth-server.uri>lb://epdc-auth-server</gateway.routes.epdc-auth-server.uri>
<gateway.routes.epdc-admin-server.uri>lb://epdc-admin-server</gateway.routes.epdc-admin-server.uri>
<gateway.routes.epdc-activiti-server.uri>lb://epdc-activiti-server</gateway.routes.epdc-activiti-server.uri>
<gateway.routes.epdc-api-server.uri>lb://epdc-api-server</gateway.routes.epdc-api-server.uri>
<gateway.routes.epdc-app-server.uri>lb://epdc-app-server</gateway.routes.epdc-app-server.uri>
<gateway.routes.epdc-heart-server.uri>lb://epdc-heart-server</gateway.routes.epdc-heart-server.uri>
<gateway.routes.epdc-job-server.uri>lb://epdc-job-server</gateway.routes.epdc-job-server.uri>
<gateway.routes.epdc-message-server.uri>lb://epdc-message-server</gateway.routes.epdc-message-server.uri>
<gateway.routes.epdc-news-server.uri>lb://epdc-news-server</gateway.routes.epdc-news-server.uri>
<gateway.routes.epdc-oss-server.uri>lb://epdc-oss-server</gateway.routes.epdc-oss-server.uri>
<gateway.routes.epdc-cloud-analysis-server.uri>lb://epdc-cloud-analysis-server</gateway.routes.epdc-cloud-analysis-server.uri>
<gateway.routes.epdc-work-record-server.uri>lb://epdc-work-record-server</gateway.routes.epdc-work-record-server.uri>
<gateway.routes.epdc-events-server.uri>lb://epdc-events-server</gateway.routes.epdc-events-server.uri>
<gateway.routes.epdc-services-server.uri>lb://epdc-services-server</gateway.routes.epdc-services-server.uri>
<gateway.routes.epdc-user-server.uri>lb://epdc-user-server</gateway.routes.epdc-user-server.uri>
<gateway.routes.epdc-demo-server.uri>lb://epdc-demo-server</gateway.routes.epdc-demo-server.uri>
<gateway.routes.epdc-group-server.uri>lb://epdc-group-server</gateway.routes.epdc-group-server.uri>
<gateway.routes.epdc-websocket-server.uri>lb://epdc-websocket-server</gateway.routes.epdc-websocket-server.uri>
<gateway.routes.epdc-kpi-server.uri>lb://epdc-kpi-server</gateway.routes.epdc-kpi-server.uri>
<gateway.routes.epdc-custom-server.uri>lb://epdc-custom-server</gateway.routes.epdc-custom-server.uri>
<gateway.routes.epdc-analysis-server.uri>lb://epdc-analysis-server</gateway.routes.epdc-analysis-server.uri>
<!-- redis配置 -->
<spring.redis.index>0</spring.redis.index>
<spring.redis.host>172.16.0.54</spring.redis.host>
<spring.redis.port>6379</spring.redis.port>
<spring.redis.password>Elink833066</spring.redis.password>
<!-- nacos -->
<nacos.register-enabled>true</nacos.register-enabled>
<nacos.server-addr>172.16.0.52:8848</nacos.server-addr>
<nacos.ip></nacos.ip>
<nacos.namespace></nacos.namespace>
<spring.zipkin.base-url>http://localhost:9411</spring.zipkin.base-url>
</properties>
</profile>
</profiles>
</project>

69
esua-epdc/epdc-gateway/src/main/java/com/elink/esua/epdc/config/CorsConfig.java

@ -1,69 +0,0 @@
/**
* Copyright (c) 2018 人人开源 All rights reserved.
* <p>
* https://www.renren.io
* <p>
* 版权所有侵权必究
*/
package com.elink.esua.epdc.config;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.web.ServerProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
import org.springframework.http.HttpStatus;
import org.springframework.http.server.reactive.ServerHttpRequest;
import org.springframework.http.server.reactive.ServerHttpResponse;
import org.springframework.web.cors.reactive.CorsUtils;
import org.springframework.web.server.ServerWebExchange;
import org.springframework.web.server.WebFilter;
import org.springframework.web.server.WebFilterChain;
import reactor.core.publisher.Mono;
/**
* Cors跨域
*
* @author Mark sunlightcs@gmail.com
* @since 1.0.0
*/
@Configuration
public class CorsConfig {
private static final String MAX_AGE = "18000L";
@Autowired
private ServerProperties serverProperties;
@Bean
public WebFilter corsFilter() {
return (ServerWebExchange ctx, WebFilterChain chain) -> {
ServerHttpRequest request = ctx.getRequest();
if (!CorsUtils.isCorsRequest(request)) {
return chain.filter(ctx);
}
HttpHeaders requestHeaders = request.getHeaders();
ServerHttpResponse response = ctx.getResponse();
HttpMethod requestMethod = requestHeaders.getAccessControlRequestMethod();
HttpHeaders headers = response.getHeaders();
if (!request.getURI().getPath().startsWith(serverProperties.getServlet().getContextPath().concat("/ws"))) {
headers.add(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, requestHeaders.getOrigin());
headers.add(HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
headers.addAll(HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS, requestHeaders.getAccessControlRequestHeaders());
if (requestMethod != null) {
headers.add(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS, requestMethod.name());
}
headers.add(HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS, "*");
headers.add(HttpHeaders.ACCESS_CONTROL_MAX_AGE, MAX_AGE);
if (request.getMethod() == HttpMethod.OPTIONS) {
response.setStatusCode(HttpStatus.OK);
return Mono.empty();
}
return chain.filter(ctx);
};
}
}

54
esua-epdc/epdc-gateway/src/main/java/com/elink/esua/epdc/feign/ResourceFeignClient.java

@ -1,54 +0,0 @@
/**
* Copyright (c) 2018 人人开源 All rights reserved.
* <p>
* https://www.renren.io
* <p>
* 版权所有侵权必究
*/
package com.elink.esua.epdc.feign;
import com.elink.esua.epdc.common.token.dto.TokenDto;
import com.elink.esua.epdc.feign.fallback.ResourceFeignClientFallback;
import com.elink.esua.epdc.commons.tools.constant.ServiceConstant;
import com.elink.esua.epdc.commons.tools.security.user.UserDetail;
import com.elink.esua.epdc.commons.tools.utils.Result;
import org.springframework.cloud.openfeign.FeignClient;
import org.springframework.http.HttpHeaders;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestHeader;
import org.springframework.web.bind.annotation.RequestParam;
/**
* 资源接口
*
* @author Mark sunlightcs@gmail.com
* @since 1.0.0
*/
@FeignClient(name = ServiceConstant.EPDC_AUTH_SERVER, fallback = ResourceFeignClientFallback.class)
public interface ResourceFeignClient {
/**
* 是否有资源访问权限
*
* @param token token
* @param url 资源URL
* @param method 请求方式
* @return 有访问权限则返回用户信息
*/
@PostMapping("auth/resource")
Result<UserDetail> resource(@RequestHeader(HttpHeaders.ACCEPT_LANGUAGE) String language, @RequestParam("token") String token,
@RequestParam("url") String url, @RequestParam("method") String method);
/**
* 获取登录用户信息
*
* @param token
* @return com.elink.esua.epdc.commons.tools.utils.Result<com.elink.esua.epdc.commons.tools.security.user.CpUserDetail>
* @author
* @date 2019/8/19 17:19
*/
@GetMapping("auth/getLoginUserInfo")
Result<TokenDto> getLoginUserInfo(@RequestParam("token") String token);
}

35
esua-epdc/epdc-gateway/src/main/java/com/elink/esua/epdc/feign/fallback/ResourceFeignClientFallback.java

@ -1,35 +0,0 @@
/**
* Copyright (c) 2018 人人开源 All rights reserved.
* <p>
* https://www.renren.io
* <p>
* 版权所有侵权必究
*/
package com.elink.esua.epdc.feign.fallback;
import com.elink.esua.epdc.common.token.dto.TokenDto;
import com.elink.esua.epdc.commons.tools.security.user.UserDetail;
import com.elink.esua.epdc.commons.tools.utils.Result;
import com.elink.esua.epdc.feign.ResourceFeignClient;
import org.springframework.stereotype.Component;
/**
* 资源接口 Fallback
*
* @author Mark sunlightcs@gmail.com
* @since 1.0.0
*/
@Component
public class ResourceFeignClientFallback implements ResourceFeignClient {
@Override
public Result<UserDetail> resource(String language, String token, String url, String method) {
return new Result<UserDetail>().error();
}
@Override
public Result<TokenDto> getLoginUserInfo(String token) {
return new Result<TokenDto>().error();
}
}

154
esua-epdc/epdc-gateway/src/main/java/com/elink/esua/epdc/filter/AuthFilter.java

@ -1,154 +0,0 @@
/**
* Copyright (c) 2018 人人开源 All rights reserved.
*
* https://www.renren.io
*
* 版权所有侵权必究
*/
package com.elink.esua.epdc.filter;
import com.alibaba.fastjson.JSON;
import com.elink.esua.epdc.feign.ResourceFeignClient;
import com.elink.esua.epdc.commons.tools.constant.Constant;
import com.elink.esua.epdc.commons.tools.security.user.UserDetail;
import com.elink.esua.epdc.commons.tools.utils.Result;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.cloud.gateway.filter.GatewayFilterChain;
import org.springframework.cloud.gateway.filter.GlobalFilter;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.buffer.DataBuffer;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.server.reactive.ServerHttpRequest;
import org.springframework.util.AntPathMatcher;
import org.springframework.web.server.ServerWebExchange;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import java.nio.charset.StandardCharsets;
import java.util.List;
/**
* 权限过滤器
*
* @author Mark sunlightcs@gmail.com
* @since 1.0.0
*/
@Configuration
@ConfigurationProperties(prefix = "renren")
public class AuthFilter implements GlobalFilter {
private final AntPathMatcher antPathMatcher = new AntPathMatcher();
@Autowired
private ResourceFeignClient resourceFeignClient;
/**
* 不拦截的urls
*/
private List<String> urls;
/**
* 不拦截工作端urls
*/
private List<String> workLoginUrls;
/**
* 拦截的工作端urls
*/
private List<String> workUrls;
@Override
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {
ServerHttpRequest request = exchange.getRequest();
String requestUri = request.getPath().pathWithinApplication().value();
//请求放行,无需验证权限
if(pathMatcher(requestUri)){
return chain.filter(exchange);
}
//获取用户token
String token = request.getHeaders().getFirst(Constant.TOKEN_HEADER);
if(StringUtils.isBlank(token)){
token = request.getHeaders().getFirst(Constant.AUTHORIZATION_HEADER);
if (StringUtils.isBlank(token)) {
token = request.getQueryParams().getFirst(Constant.TOKEN_HEADER);
}
}
//资源访问权限
String language = request.getHeaders().getFirst(HttpHeaders.ACCEPT_LANGUAGE);
Result<UserDetail> result = resourceFeignClient.resource(language, token, requestUri, request.getMethod().toString());
//没权限访问,直接返回
if(!result.success()){
return response(exchange, result);
}
//获取用户信息
UserDetail userDetail = result.getData();
if(userDetail != null){
//当前登录用户userId,添加到header中
ServerHttpRequest build = exchange.getRequest().mutate().header(Constant.USER_KEY, userDetail.getId()+"").build();
return chain.filter(exchange.mutate().request(build).build());
}
return chain.filter(exchange);
}
private Mono<Void> response(ServerWebExchange exchange, Object object) {
String json = JSON.toJSONString(object);
DataBuffer buffer = exchange.getResponse().bufferFactory().wrap(json.getBytes(StandardCharsets.UTF_8));
exchange.getResponse().getHeaders().setContentType(MediaType.APPLICATION_JSON_UTF8);
exchange.getResponse().setStatusCode(HttpStatus.OK);
return exchange.getResponse().writeWith(Flux.just(buffer));
}
private boolean pathMatcher(String requestUri){
for (String url : workLoginUrls) {
if(antPathMatcher.match(url, requestUri)){
return true;
}
}
for (String url : workUrls) {
if(antPathMatcher.match(url, requestUri)){
return false;
}
}
for (String url : urls) {
if(antPathMatcher.match(url, requestUri)){
return true;
}
}
return false;
}
public List<String> getUrls() {
return urls;
}
public void setUrls(List<String> urls) {
this.urls = urls;
}
public List<String> getWorkLoginUrls() {
return workLoginUrls;
}
public void setWorkLoginUrls(List<String> workLoginUrls) {
this.workLoginUrls = workLoginUrls;
}
public List<String> getWorkUrls() {
return workUrls;
}
public void setWorkUrls(List<String> workUrls) {
this.workUrls = workUrls;
}
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save