Browse Source

Merge branch 'feature/governanceRanking' of http://121.42.41.42:7070/r/esua-epdc-cloud into feature/governanceRanking

 Conflicts:
	esua-epdc/pom.xml
feature/dangjian
管理员 6 years ago
parent
commit
f6e81264e7
  1. 3
      .gitmodules
  2. 1
      esua-epdc-cloud/epdc-cloud-client-shibei
  3. 1
      esua-epdc-cloud/epdc-cloud-gateway-shibei
  4. 1
      esua-epdc-cloud/epdc-cloud-parent-shibei
  5. 52
      esua-epdc/doc/db/dev_youhua/esua_epdc_user.sql
  6. 14
      esua-epdc/docker-compose/jinshui/app/admin/docker-compose.yml
  7. 14
      esua-epdc/docker-compose/jinshui/app/api/docker-compose.yml
  8. 14
      esua-epdc/docker-compose/jinshui/app/auth/docker-compose.yml
  9. 14
      esua-epdc/docker-compose/jinshui/app/events/docker-compose.yml
  10. 14
      esua-epdc/docker-compose/jinshui/app/gateway/docker-compose.yml
  11. 14
      esua-epdc/docker-compose/jinshui/app/group/docker-compose.yml
  12. 14
      esua-epdc/docker-compose/jinshui/app/message/docker-compose.yml
  13. 14
      esua-epdc/docker-compose/jinshui/app/news/docker-compose.yml
  14. 14
      esua-epdc/docker-compose/jinshui/app/oss/docker-compose.yml
  15. 14
      esua-epdc/docker-compose/jinshui/app/user/docker-compose.yml
  16. 14
      esua-epdc/docker-compose/jinshui/app/websocket/docker-compose.yml
  17. 45
      esua-epdc/docker-compose/jinshui/node03/1-mysql/conf/mysql.conf.cnf
  18. 21
      esua-epdc/docker-compose/jinshui/node03/1-mysql/docker-compose.yml
  19. 20
      esua-epdc/docker-compose/jinshui/node03/2-nginx/docker-compose.yml
  20. 27
      esua-epdc/docker-compose/jinshui/node03/3-nacos/docker-compose.yml
  21. 17
      esua-epdc/docker-compose/jinshui/node03/4-redis/docker-compose.yml
  22. 9
      esua-epdc/docker-compose/jinshui/node03/4-redis/sentinel/conf/sentinel1.conf
  23. 18
      esua-epdc/docker-compose/jinshui/node03/4-redis/sentinel/docker-compose.yml
  24. 73
      esua-epdc/docker-compose/jinshui/node03/5-seata/conf/registry.conf
  25. 25
      esua-epdc/docker-compose/jinshui/node03/5-seata/docker-compose.yml
  26. 9
      esua-epdc/docker-compose/jinshui/node03/Readme.md
  27. 36
      esua-epdc/docker-compose/jinshui/node04/1-mysql/conf/mysql.conf.cnf
  28. 22
      esua-epdc/docker-compose/jinshui/node04/1-mysql/docker-compose.yml
  29. 50
      esua-epdc/docker-compose/jinshui/node04/2-nacos/docker-compose.yml
  30. 73
      esua-epdc/docker-compose/jinshui/node04/3-seata/conf/registry.conf
  31. 25
      esua-epdc/docker-compose/jinshui/node04/3-seata/docker-compose.yml
  32. 81
      esua-epdc/docker-compose/jinshui/node04/3-seata/script/config.txt
  33. 89
      esua-epdc/docker-compose/jinshui/node04/3-seata/script/nacos-config.sh
  34. 29
      esua-epdc/docker-compose/jinshui/node04/4-redis/docker-compose.yml
  35. 9
      esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/conf/sentinel1.conf
  36. 9
      esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/conf/sentinel2.conf
  37. 31
      esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/docker-compose.yml
  38. 8
      esua-epdc/docker-compose/jinshui/node04/Readme.md
  39. 39
      esua-epdc/docker-compose/jinshui/node04/fastdfs/Dockerfile
  40. 45
      esua-epdc/docker-compose/jinshui/node04/fastdfs/README.md
  41. 63
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/client.conf
  42. 29
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/http.conf
  43. 1065
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/mime.types
  44. 134
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/mod_fastdfs.conf
  45. 127
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/nginx.conf
  46. 287
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/storage.conf
  47. 278
      esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/tracker.conf
  48. 26
      esua-epdc/docker-compose/jinshui/node04/fastdfs/fastdfs.sh
  49. 47
      esua-epdc/docker-compose/jinshui/node04/fastdfs/nginx/docker-compose.yml
  50. 47
      esua-epdc/docker-compose/jinshui/node04/fastdfs/storage/docker-compose.yml
  51. 47
      esua-epdc/docker-compose/jinshui/node04/fastdfs/tracker/docker-compose.yml
  52. 14
      esua-epdc/docker-compose/prod/master/application/10.5.34.162-master/docker-compose.yml
  53. 50
      esua-epdc/docker-compose/prod/master/picture/10.5.34.166/docker-compose.yml
  54. 25
      esua-epdc/docker-compose/prod/node01/3-nacos/docker-compose.yml
  55. 73
      esua-epdc/docker-compose/prod/node01/4-seata/conf/registry.conf
  56. 39
      esua-epdc/docker-compose/prod/node01/4-seata/docker-compose.yml
  57. 84
      esua-epdc/docker-compose/prod/node01/4-seata/script/config.txt
  58. 89
      esua-epdc/docker-compose/prod/node01/4-seata/script/nacos-config.sh
  59. 48
      esua-epdc/docker-compose/prod/node02/2-nacos/docker-compose.yml
  60. 73
      esua-epdc/docker-compose/prod/node02/5-seata/conf/registry.conf
  61. 21
      esua-epdc/docker-compose/prod/node02/5-seata/docker-compose.yml
  62. 39
      esua-epdc/docker-compose/test/node01/1-mysql/conf/mysql.conf.cnf
  63. 1
      esua-epdc/docker-compose/test/node01/2-nginx/docker-compose.yml
  64. 1
      esua-epdc/docker-compose/test/node01/3-nacos/docker-compose.yml
  65. 3
      esua-epdc/docker-compose/test/node01/4-redis/docker-compose.yml
  66. 1
      esua-epdc/docker-compose/test/node01/5-seata/docker-compose.yml
  67. 35
      esua-epdc/docker-compose/test/node02/1-mysql/conf/mysql.conf.cnf
  68. 3
      esua-epdc/docker-compose/test/node02/1-mysql/docker-compose.yml
  69. 4
      esua-epdc/docker-compose/test/node02/2-nacos/docker-compose.yml
  70. 2
      esua-epdc/docker-compose/test/node02/3-seata/docker-compose.yml
  71. 6
      esua-epdc/docker-compose/test/node02/4-redis/docker-compose.yml
  72. 4
      esua-epdc/epdc-admin/epdc-admin-client/src/main/java/com/elink/esua/epdc/dto/AppMenuDTO.java
  73. 61
      esua-epdc/epdc-admin/epdc-admin-client/src/main/java/com/elink/esua/epdc/dto/DeptGridDTO.java
  74. 5
      esua-epdc/epdc-admin/epdc-admin-client/src/main/java/com/elink/esua/epdc/dto/SysAnalysisMenuDTO.java
  75. 2
      esua-epdc/epdc-admin/epdc-admin-server/Dockerfile
  76. 14
      esua-epdc/epdc-admin/epdc-admin-server/pom.xml
  77. 25
      esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/controller/SysUserController.java
  78. 8
      esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/dao/SysDeptDao.java
  79. 2
      esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/dao/SysRoleUserDao.java
  80. 1
      esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/entity/SysAnalysisMenuEntity.java
  81. 4
      esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/SysDeptService.java
  82. 6
      esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/SysRoleUserService.java
  83. 146
      esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/impl/SysDeptServiceImpl.java
  84. 5
      esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/impl/SysRoleDataScopeServiceImpl.java
  85. 2
      esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/impl/SysRoleServiceImpl.java
  86. 5
      esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/impl/SysRoleUserServiceImpl.java
  87. 1
      esua-epdc/epdc-admin/epdc-admin-server/src/main/resources/application.yml
  88. 26
      esua-epdc/epdc-admin/epdc-admin-server/src/main/resources/mapper/SysDeptDao.xml
  89. 14
      esua-epdc/epdc-admin/epdc-admin-server/src/main/resources/mapper/SysRoleUserDao.xml
  90. 2
      esua-epdc/epdc-auth/Dockerfile
  91. 10
      esua-epdc/epdc-auth/src/main/resources/logback-spring.xml
  92. 6
      esua-epdc/epdc-commons/epdc-common-clienttoken/pom.xml
  93. 12
      esua-epdc/epdc-commons/epdc-common-clienttoken/src/main/java/com/elink/esua/epdc/common/token/dto/TokenDto.java
  94. 20
      esua-epdc/epdc-commons/epdc-common-clienttoken/src/main/java/com/elink/esua/epdc/common/token/util/CpUserDetailRedis.java
  95. 36
      esua-epdc/epdc-commons/epdc-commons-mybatis/src/main/java/com/elink/esua/epdc/commons/mybatis/entity/DeptScope.java
  96. 74
      esua-epdc/epdc-commons/epdc-commons-mybatis/src/main/java/com/elink/esua/epdc/commons/mybatis/utils/DeptEntityUtils.java
  97. 2
      esua-epdc/epdc-commons/epdc-commons-tools-wx-ma/pom.xml
  98. 12
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/NacosConfigConstant.java
  99. 1
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/NumConstant.java
  100. 13
      esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/PointsConstant.java

3
.gitmodules

@ -0,0 +1,3 @@
[submodule "esua-epdc/epdc-cloud-gateway-shibei"]
path = esua-epdc/epdc-cloud-gateway-shibei
url = http://121.42.41.42:7070/r/epdc-cloud-gateway-shibei.git

1
esua-epdc-cloud/epdc-cloud-client-shibei

@ -0,0 +1 @@
Subproject commit ca9f08fb7691c3e01e475c42fa5ca54d98d13ced

1
esua-epdc-cloud/epdc-cloud-gateway-shibei

@ -0,0 +1 @@
Subproject commit df286464819c7ec8d57c3744890289b905bd73a1

1
esua-epdc-cloud/epdc-cloud-parent-shibei

@ -0,0 +1 @@
Subproject commit 5bbc8bdcdbad628c32c27c39d95c5a23f6cc213d

52
esua-epdc/doc/db/dev_youhua/esua_epdc_user.sql

@ -0,0 +1,52 @@
--------线
ALTER TABLE esua_epdc_user.epdc_party_authentication_failed MODIFY COLUMN `STATE` VARCHAR(1) NULL COMMENT '状态 0-认证失败';
ALTER TABLE esua_epdc_user.epdc_party_authentication_failed MODIFY COLUMN `CADRE_FLAG` varchar(1) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT 0 NULL COMMENT '干部下沉标识 0-否,1-是';
ALTER TABLE esua_epdc_user.epdc_user_authenticate_history ADD AUTHENTICATED_TYPE varchar(2) NOT NULL COMMENT '认证类别(0-居民认证,1-党员认证,2-志愿者认证)';
------------------------------------------------
CREATE TABLE `epdc_handle_category` (
`ID` varchar(32) NOT NULL COMMENT '主键',
`CATEGORY_VAL` int(11) NOT NULL COMMENT '处理类别值',
`CATEGORY_LABEL` varchar(20) NOT NULL COMMENT '处理类别显示信息',
`AVAILABLE` varchar(1) NOT NULL DEFAULT '1' COMMENT '可用状态(0-不可用,1-可用)',
`SORT` int(11) NOT NULL DEFAULT '0' COMMENT '排序',
`REVISION` int(11) DEFAULT NULL COMMENT '乐观锁',
`CREATED_BY` varchar(32) DEFAULT NULL COMMENT '创建人',
`CREATED_TIME` datetime DEFAULT NULL COMMENT '创建时间',
`UPDATED_BY` varchar(32) DEFAULT NULL COMMENT '更新人',
`UPDATED_TIME` datetime DEFAULT NULL COMMENT '更新时间',
`DEL_FLAG` varchar(1) NOT NULL COMMENT '删除标识 0:未删除,1:已删除',
`CATEGORY_CODE` varchar(32) NOT NULL COMMENT '处理类型编码',
PRIMARY KEY (`ID`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='处理类别表'
CREATE TABLE `epdc_role_category` (
`ID` varchar(32) NOT NULL COMMENT '主键',
`ROLE_ID` varchar(32) NOT NULL COMMENT '角色ID',
`CATEGORY_ID` varchar(32) NOT NULL COMMENT '处理类别ID',
`REVISION` int(11) DEFAULT NULL COMMENT '乐观锁',
`CREATED_BY` varchar(32) DEFAULT NULL COMMENT '创建人',
`CREATED_TIME` datetime DEFAULT NULL COMMENT '创建时间',
`UPDATED_BY` varchar(32) DEFAULT NULL COMMENT '更新人',
`UPDATED_TIME` datetime DEFAULT NULL COMMENT '更新时间',
`DEL_FLAG` varchar(1) NOT NULL COMMENT '删除标识 0:未删除,1:已删除',
PRIMARY KEY (`ID`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='角色和处理类别关系表'
ALTER TABLE EPDC_HANDLE_ROLE_CATEGORY COMMENT '角色和处理类别关系表';;
CREATE TABLE EPDC_HANDLE_ROLE_DEPT(
ID VARCHAR(32) NOT NULL COMMENT '主键' ,
ROLE_ID VARCHAR(32) NOT NULL COMMENT '角色ID' ,
DEPT_ID VARCHAR(32) NOT NULL COMMENT '部门ID' ,
DEPT_TYPE VARCHAR(50) NOT NULL COMMENT '部门机构类型' ,
REVISION INT COMMENT '乐观锁' ,
CREATED_BY VARCHAR(32) COMMENT '创建人' ,
CREATED_TIME DATETIME COMMENT '创建时间' ,
UPDATED_BY VARCHAR(32) COMMENT '更新人' ,
UPDATED_TIME DATETIME COMMENT '更新时间' ,
PRIMARY KEY (ID)
) COMMENT = '处理部门角色权限表 处理部门角色权限表';;
ALTER TABLE EPDC_HANDLE_ROLE_DEPT COMMENT '处理部门角色权限表';;

14
esua-epdc/docker-compose/jinshui/app/admin/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-admin-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-admin-server:prod
container_name: epdc-admin-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.41
volumes:
- /mnt/epdc/app/admin/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/api/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-api-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-api-server:prod
container_name: epdc-api-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.42
volumes:
- /mnt/epdc/app/api/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/auth/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-auth:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-auth:prod
container_name: epdc-auth-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.43
volumes:
- /mnt/epdc/app/auth/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/events/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-events-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-events-server:prod
container_name: epdc-events-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.44
volumes:
- /mnt/epdc/app/events/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/gateway/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-gateway:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-gateway:prod
container_name: epdc-gateway-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.40
volumes:
- /mnt/epdc/app/gateway/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/group/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-group-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-group-server:prod
container_name: epdc-group-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.45
volumes:
- /mnt/epdc/app/group/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/message/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-message-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-message-server:prod
container_name: epdc-message-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.46
volumes:
- /mnt/epdc/app/message/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/news/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-news-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-news-server:prod
container_name: epdc-news-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.47
volumes:
- /mnt/epdc/app/news/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/oss/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-oss-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-oss-server:prod
container_name: epdc-oss-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.48
volumes:
- /mnt/epdc/app/oss/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/user/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-user-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-user-server:prod
container_name: epdc-user-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.49
volumes:
- /mnt/epdc/app/user/logs:/logs
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/jinshui/app/websocket/docker-compose.yml

@ -0,0 +1,14 @@
version: '3.7'
services:
epdc-websocket-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-websocket-server:prod
container_name: epdc-websocket-server-01
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.50
volumes:
- /mnt/epdc/app/websocket/logs:/logs
networks:
epdc_network:
external: true

45
esua-epdc/docker-compose/jinshui/node03/1-mysql/conf/mysql.conf.cnf

@ -0,0 +1,45 @@
[client]
default-character-set=utf8
[mysqld]
character-set-server=utf8
##### 这两部操作可以解决mysql连接很慢的问题 #####
# 根据官方文档说明,如果你的mysql主机查询DNS很慢或是有很多客户端主机时会导致连接很慢,由于我们的开发机器是不能够连接外网的,
# 所以DNS解析是不可能完成的,从而也就明白了为什么连接那么慢了。同时,请注意在增加该配置参数后,mysql的授权表中的host字段就
# 不能够使用域名而只能够使用 ip地址了,因为这是禁止了域名解析的结果。
# 1.禁止域名解析
skip-host-cache
# 2.禁用dns解析,但是,这样不能在mysql的授权表中使用主机名了,只能使用IP。
skip-name-resolve
# 数据库编号, 要具有唯一性, 不能跟其他数据库重复, 方便同步区分
server-id = 21
# mysql日志
log_bin = /var/lib/mysql/mysql-bin.log
log-bin-index=slave-relay-bin.index
#日志记录的格式
binlog_format=MIXED
#单个日志文件最大
max_binlog_size = 512M 
#从库建议开启,有利于数据一致性
relay_log_recovery = 1   
#如果从库还会用做主库,建议开启
log_slave_updates = 1   
# 中继日志:存储所有主库TP过来的binlog事件主库binlog:记录主库发生过的修改事件
# relay-log = /var/lib/mysql/mysql-relay-bin.log
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION

21
esua-epdc/docker-compose/jinshui/node03/1-mysql/docker-compose.yml

@ -0,0 +1,21 @@
version: "3.7"
services:
mysql-slave:
container_name: mysql-slave
image: mysql:5.7
environment:
TZ: Asia/Shanghai
MYSQL_ROOT_PASSWORD: epdc!elink1405
MYSQL_LOWER_CASE_TABLE_NAMES: 1
volumes:
- /etc/localtime:/etc/localtime
- /etc/timezone:/etc/timezone
- /mnt/epdc/mysql/data:/var/lib/mysql
- /mnt/epdc/mysql/conf/mysql.conf.cnf:/etc/mysql/conf.d/mysql.conf.cnf
restart: always
networks:
epdc_network:
ipv4_address: 172.20.0.3
networks:
epdc_network:
external: true

20
esua-epdc/docker-compose/jinshui/node03/2-nginx/docker-compose.yml

@ -0,0 +1,20 @@
version: "3.7"
services:
web:
image: nginx
ports:
- 80:80
- 443:443
volumes:
- /mnt/epdc/nginx/html:/usr/share/nginx/html
- /mnt/epdc/nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro
- /mnt/epdc/nginx/conf.d:/etc/nginx/conf.d:ro
- /mnt/epdc/nginx/logs:/var/log/nginx
restart: always
container_name: nginx_master
networks:
epdc_network:
ipv4_address: 172.20.0.4
networks:
epdc_network:
external: true

27
esua-epdc/docker-compose/jinshui/node03/3-nacos/docker-compose.yml

@ -0,0 +1,27 @@
version: "3.7"
services:
nacos3:
image: nacos/nacos-server:latest
container_name: nacos3
networks:
epdc_network:
ipv4_address: 172.20.0.5
volumes:
- /mnt/epdc/nacos/logs:/home/nacos/logs
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_IP: 172.20.0.5 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.20.0.5:8848 172.19.0.3:8848 172.19.0.4:8848 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_MASTER_SERVICE_HOST: 172.19.0.2 #mysql配置,Master为主节点,Slave为从节点
MYSQL_MASTER_SERVICE_PORT: 3306
MYSQL_MASTER_SERVICE_DB_NAME: epdc_nacos
MYSQL_MASTER_SERVICE_USER: nacos
MYSQL_MASTER_SERVICE_PASSWORD: elink!nacos888
MYSQL_SLAVE_SERVICE_HOST: 172.20.0.3
MYSQL_SLAVE_SERVICE_PORT: 3306
restart: always
networks:
epdc_network:
external: true

17
esua-epdc/docker-compose/jinshui/node03/4-redis/docker-compose.yml

@ -0,0 +1,17 @@
version: '3.7'
services:
slave2:
image: redis
container_name: redis-slave-2
command: redis-server --slaveof 172.19.0.11 6379 --requirepass epdc!redis@slave1405 --masterauth epdc!redis@master1405 --logfile /data/log/redis-slave2.log
restart: always
volumes:
- /mnt/epdc/redis/log:/data/log
- /mnt/epdc/redis/data:/data
networks:
epdc_network:
ipv4_address: 172.20.0.11
networks:
epdc_network:
external: true

9
esua-epdc/docker-compose/jinshui/node03/4-redis/sentinel/conf/sentinel1.conf

@ -0,0 +1,9 @@
port 26379
logfile "/usr/local/redis/sentinel/log/sentinel.log"
dir "/usr/local/redis/sentinel"
sentinel monitor epdcmaster 172.19.0.11 6379 2
sentinel down-after-milliseconds epdcmaster 30000
sentinel parallel-syncs epdcmaster 1
sentinel failover-timeout epdcmaster 180000
sentinel deny-scripts-reconfig yes
sentinel auth-pass epdcmaster epdc!redis@master1405

18
esua-epdc/docker-compose/jinshui/node03/4-redis/sentinel/docker-compose.yml

@ -0,0 +1,18 @@
version: '3.7'
services:
sentinel2:
image: redis
container_name: redis-sentinel-2
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
volumes:
- /mnt/epdc/redis/sentinel/conf/sentinel1.conf:/usr/local/etc/redis/sentinel.conf
- /mnt/epdc/redis/sentinel/data:/data
- /mnt/epdc/redis/sentinel/log:/usr/local/redis/sentinel/log
- /mnt/epdc/redis/sentinel/dir:/usr/local/redis/sentinel
networks:
epdc_network:
ipv4_address: 172.20.0.12
networks:
epdc_network:
external: true

73
esua-epdc/docker-compose/jinshui/node03/5-seata/conf/registry.conf

@ -0,0 +1,73 @@
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}

25
esua-epdc/docker-compose/prod/master/db/10.5.34.164/docker-compose.yml → esua-epdc/docker-compose/jinshui/node03/5-seata/docker-compose.yml

@ -21,15 +21,20 @@
version: "3.7"
services:
seata-server:
seata-server2:
container_name: seata-server2
image: seataio/seata-server:latest
hostname: seata-server
ports:
- 9101:8091
environment:
- SEATA_PORT=8091
- SEATA_IP=10.5.34.164
- STORE_MODE=db
- SERVER_NODE=1
expose:
- 8091
SEATA_IP: 172.20.0.21
SEATA_PORT: 8091
STORE_MODE: db
SERVER_NODE: 2
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /mnt/epdc/seata/seata-config:/root/seata-config
networks:
epdc_network:
ipv4_address: 172.20.0.21
networks:
epdc_network:
external: true

9
esua-epdc/docker-compose/jinshui/node03/Readme.md

@ -0,0 +1,9 @@
1. 创建网络:
```
docker network create -d bridge --subnet 172.20.0.0/24 epdc_network
```
2. 执行1-mysql中的docker-compose.yml
3. 执行2-nacos中的docker-compose.yml
https://github.com/alibaba/nacos/blob/master/distribution/conf/nacos-mysql.sql

36
esua-epdc/docker-compose/jinshui/node04/1-mysql/conf/mysql.conf.cnf

@ -0,0 +1,36 @@
[client]
default-character-set=utf8
[mysqld]
character-set-server=utf8
##### 这两部操作可以解决mysql连接很慢的问题 #####
# 根据官方文档说明,如果你的mysql主机查询DNS很慢或是有很多客户端主机时会导致连接很慢,由于我们的开发机器是不能够连接外网的,
# 所以DNS解析是不可能完成的,从而也就明白了为什么连接那么慢了。同时,请注意在增加该配置参数后,mysql的授权表中的host字段就
# 不能够使用域名而只能够使用 ip地址了,因为这是禁止了域名解析的结果。
# 1.禁止域名解析
skip-host-cache
# 2.禁用dns解析,但是,这样不能在mysql的授权表中使用主机名了,只能使用IP。
skip-name-resolve
# 数据库编号, 要具有唯一性, 不能跟其他数据库重复, 方便同步区分
server-id = 11
# mysql日志
log_bin = /var/lib/mysql/master-bin.log
log-bin-index=master-bin.index
# binlog日志格式,mysql默认采用statement,建议使用 mixed(是statement和row模式的结合)
binlog_format = mixed
#单个日志文件最大
max_binlog_size = 512M
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION

22
esua-epdc/docker-compose/jinshui/node04/1-mysql/docker-compose.yml

@ -0,0 +1,22 @@
version: "3.7"
services:
mysql-master:
container_name: mysql-master
image: mysql:5.7
environment:
TZ: Asia/Shanghai
MYSQL_ROOT_PASSWORD: epdc!elink1405
MYSQL_LOWER_CASE_TABLE_NAMES: 1
volumes:
- /etc/localtime:/etc/localtime
- /etc/timezone:/etc/timezone
- /mnt/epdc/mysql/data:/var/lib/mysql
- /mnt/epdc/mysql/conf/mysql.conf.cnf:/etc/mysql/conf.d/mysql.conf.cnf
restart: always
networks:
epdc_network:
ipv4_address: 172.19.0.2
networks:
epdc_network:
external: true

50
esua-epdc/docker-compose/jinshui/node04/2-nacos/docker-compose.yml

@ -0,0 +1,50 @@
version: "3.7"
services:
nacos1:
image: nacos/nacos-server:latest
container_name: nacos1
networks:
epdc_network:
ipv4_address: 172.19.0.3
volumes:
- /mnt/epdc/nacos/logs/nacos1:/home/nacos/logs
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_IP: 172.19.0.3 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.20.0.5:8848 172.19.0.3:8848 172.19.0.4:8848 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_MASTER_SERVICE_HOST: 172.19.0.2 #mysql配置,Master为主节点,Slave为从节点
MYSQL_MASTER_SERVICE_PORT: 3306
MYSQL_MASTER_SERVICE_DB_NAME: epdc_nacos
MYSQL_MASTER_SERVICE_USER: nacos
MYSQL_MASTER_SERVICE_PASSWORD: elink!nacos888
MYSQL_SLAVE_SERVICE_HOST: 172.20.0.3
MYSQL_SLAVE_SERVICE_PORT: 3306
restart: on-failure
nacos2:
image: nacos/nacos-server:latest
container_name: nacos2
networks:
epdc_network:
ipv4_address: 172.19.0.4
volumes:
- /mnt/epdc/nacos/logs/nacos2:/home/nacos/logs
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_IP: 172.19.0.4 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.20.0.5:8848 172.19.0.3:8848 172.19.0.4:8848 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_MASTER_SERVICE_HOST: 172.19.0.2 #mysql配置,Master为主节点,Slave为从节点
MYSQL_MASTER_SERVICE_PORT: 3306
MYSQL_MASTER_SERVICE_DB_NAME: epdc_nacos
MYSQL_MASTER_SERVICE_USER: nacos
MYSQL_MASTER_SERVICE_PASSWORD: elink!nacos888
MYSQL_SLAVE_SERVICE_HOST: 172.20.0.3
MYSQL_SLAVE_SERVICE_PORT: 3306
restart: always
networks:
epdc_network:
external: true

73
esua-epdc/docker-compose/jinshui/node04/3-seata/conf/registry.conf

@ -0,0 +1,73 @@
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}

25
esua-epdc/docker-compose/prod/master/db/10.5.34.166/docker-compose.yml → esua-epdc/docker-compose/jinshui/node04/3-seata/docker-compose.yml

@ -21,15 +21,20 @@
version: "3.7"
services:
seata-server:
seata-server1:
container_name: seata-server1
image: seataio/seata-server:latest
hostname: seata-server
ports:
- 9101:8091
environment:
- SEATA_PORT=8091
- SEATA_IP=10.5.34.166
- STORE_MODE=db
- SERVER_NODE=1
expose:
- 8091
SEATA_IP: 172.19.0.21
SEATA_PORT: 8091
STORE_MODE: db
SERVER_NODE: 1
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /mnt/epdc/seata/seata-config:/root/seata-config
networks:
epdc_network:
ipv4_address: 172.19.0.21
networks:
epdc_network:
external: true

81
esua-epdc/docker-compose/jinshui/node04/3-seata/script/config.txt

@ -0,0 +1,81 @@
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enable-client-batch-send-request=false
transport.thread-factory.boss-thread-prefix=NettyBoss
transport.thread-factory.worker-thread-prefix=NettyServerNIOWorker
transport.thread-factory.server-executor-thread-prefix=NettyServerBizHandler
transport.thread-factory.share-boss-worker=false
transport.thread-factory.client-selector-thread-prefix=NettyClientSelector
transport.thread-factory.client-selector-thread-size=1
transport.thread-factory.client-worker-thread-prefix=NettyClientWorkerThread
transport.thread-factory.boss-thread-size=1
transport.thread-factory.worker-thread-size=8
transport.shutdown.wait=3
service.vgroup_mapping.my_test_tx_group=default
service.vgroup_mapping.epdc-api-server-fescar-service-group=default
service.vgroup_mapping.epdc-demo-server-fescar-service-group=default
service.vgroup_mapping.epdc-user-server-fescar-service-group=default
service.vgroup_mapping.epdc-services-server-fescar-service-group=default
service.vgroup_mapping.epdc-party-server-fescar-service-group=default
service.vgroup_mapping.epdc-heart-server-fescar-service-group=default
service.vgroup_mapping.epdc-neighbor-server-fescar-service-group=default
service.vgroup_mapping.epdc-oss-server-fescar-service-group=default
service.vgroup_mapping.epdc-message-server-fescar-service-group=default
service.vgroup_mapping.epdc-news-server-fescar-service-group=default
service.vgroup_mapping.epdc-job-server-fescar-service-group=default
service.vgroup_mapping.epdc-admin-server-fescar-service-group=default
service.vgroup_mapping.epdc-activiti-server-fescar-service-group=default
service.vgroup_mapping.epdc-kpi-server-fescar-service-group=default
service.vgroup_mapping.epdc-points-server-fescar-service-group=default
service.vgroup_mapping.epdc-webservice-server-fescar-service-group=default
service.vgroup_mapping.epdc-events-server-fescar-service-group=default
service.enableDegrade=false
service.disableGlobalTransaction=false
client.rm.async.commit.buffer.limit=10000
client.rm.lock.retry.internal=10
client.rm.lock.retry.times=30
client.rm.report.retry.count=5
client.rm.lock.retry.policy.branch-rollback-on-conflict=true
client.rm.table.meta.check.enable=false
client.rm.report.success.enable=true
client.tm.commit.retry.count=5
client.tm.rollback.retry.count=5
store.mode=db
store.file.dir=file_store/data
store.file.max-branch-session-size=16384
store.file.max-global-session-size=512
store.file.file-write-buffer-cache-size=16384
store.file.flush-disk-mode=async
store.file.session.reload.read_size=100
store.db.datasource=dbcp
store.db.db-type=mysql
store.db.driver-class-name=com.mysql.jdbc.Driver
store.db.url=jdbc:mysql://172.19.0.2:3306/seata?useUnicode=true
store.db.user=epdc
store.db.password=Elink@833066
store.db.min-conn=1
store.db.max-conn=3
store.db.global.table=global_table
store.db.branch.table=branch_table
store.db.query-limit=100
store.db.lock-table=lock_table
server.recovery.committing-retry-period=1000
server.recovery.asyn-committing-retry-period=1000
server.recovery.rollbacking-retry-period=1000
server.recovery.timeout-retry-period=1000
server.max.commit.retry.timeout=-1
server.max.rollback.retry.timeout=-1
server.rollback.retry.timeout.unlock.enable=false
client.undo.data.validation=true
client.undo.log.serialization=jackson
server.undo.log.save.days=7
server.undo.log.delete.period=86400000
client.undo.log.table=undo_log
client.log.exceptionRate=100
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registry-type=compact
metrics.exporter-list=prometheus
metrics.exporter-prometheus-port=9898

89
esua-epdc/docker-compose/jinshui/node04/3-seata/script/nacos-config.sh

@ -0,0 +1,89 @@
#!/usr/bin/env bash
# Copyright 1999-2019 Seata.io Group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at、
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
while getopts ":h:p:g:t:" opt
do
case $opt in
h)
host=$OPTARG
;;
p)
port=$OPTARG
;;
g)
group=$OPTARG
;;
t)
tenant=$OPTARG
;;
?)
echo "\033[31m USAGE OPTION: $0 [-h host] [-p port] [-g group] [-t tenant] \033[0m"
exit 1
;;
esac
done
if [[ -z ${host} ]]; then
host=localhost
fi
if [[ -z ${port} ]]; then
port=8848
fi
if [[ -z ${group} ]]; then
group="SEATA_GROUP"
fi
if [[ -z ${tenant} ]]; then
tenant=""
fi
nacosAddr=$host:$port
contentType="content-type:application/json;charset=UTF-8"
echo "set nacosAddr=$nacosAddr"
echo "set group=$group"
failCount=0
tempLog=$(mktemp -u)
function addConfig() {
curl -X POST -H "${1}" "http://$2/nacos/v1/cs/configs?dataId=$3&group=$group&content=$4&tenant=$tenant" >"${tempLog}" 2>/dev/null
if [[ -z $(cat "${tempLog}") ]]; then
echo "\033[31m Please check the cluster status. \033[0m"
exit 1
fi
if [[ $(cat "${tempLog}") =~ "true" ]]; then
echo "Set $3=$4\033[32m successfully \033[0m"
else
echo "Set $3=$4\033[31m failure \033[0m"
(( failCount++ ))
fi
}
count=0
for line in $(cat $(dirname "$PWD")/config.txt); do
(( count++ ))
key=${line%%=*}
value=${line#*=}
addConfig "${contentType}" "${nacosAddr}" "${key}" "${value}"
done
echo "========================================================================="
echo " Complete initialization parameters, \033[32m total-count:$count \033[0m, \033[31m failure-count:$failCount \033[0m"
echo "========================================================================="
if [[ ${failCount} -eq 0 ]]; then
echo "\033[32m Init nacos config finished, please start seata-server. \033[0m"
else
echo "\033[31m init nacos config fail. \033[0m"
fi

29
esua-epdc/docker-compose/jinshui/node04/4-redis/docker-compose.yml

@ -0,0 +1,29 @@
version: '3.7'
services:
master:
image: redis
container_name: redis-master
command: redis-server --requirepass epdc!redis@master1405 --logfile /data/log/redis-master.log
restart: always
volumes:
- /mnt/epdc/redis/log:/data/log
- /mnt/epdc/redis/data:/data
networks:
epdc_network:
ipv4_address: 172.19.0.11
slave1:
image: redis
container_name: redis-slave-1
command: redis-server --slaveof 172.19.0.11 6379 --requirepass epdc!redis@slave1405 --masterauth epdc!redis@master1405 --logfile /data/log/redis-slave1.log
restart: always
volumes:
- /mnt/epdc/redis/log:/data/log
- /mnt/epdc/redis/data:/data
networks:
epdc_network:
ipv4_address: 172.19.0.12
networks:
epdc_network:
external: true

9
esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/conf/sentinel1.conf

@ -0,0 +1,9 @@
port 26379
logfile "/usr/local/redis/sentinel/log/sentinel.log"
dir "/usr/local/redis/sentinel"
sentinel monitor epdcmaster 172.19.0.11 6379 2
sentinel down-after-milliseconds epdcmaster 30000
sentinel parallel-syncs epdcmaster 1
sentinel failover-timeout epdcmaster 180000
sentinel deny-scripts-reconfig yes
sentinel auth-pass epdcmaster epdc!redis@master1405

9
esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/conf/sentinel2.conf

@ -0,0 +1,9 @@
port 26379
logfile "/usr/local/redis/sentinel/log/sentinel.log"
dir "/usr/local/redis/sentinel"
sentinel monitor epdcmaster 172.19.0.11 6379 2
sentinel down-after-milliseconds epdcmaster 30000
sentinel parallel-syncs epdcmaster 1
sentinel failover-timeout epdcmaster 180000
sentinel deny-scripts-reconfig yes
sentinel auth-pass epdcmaster epdc!redis@master1405

31
esua-epdc/docker-compose/jinshui/node04/4-redis/sentinel/docker-compose.yml

@ -0,0 +1,31 @@
version: '3.7'
services:
sentinel1:
image: redis
container_name: redis-sentinel-1
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
volumes:
- /mnt/epdc/redis/sentinel/conf/sentinel1.conf:/usr/local/etc/redis/sentinel.conf
- /mnt/epdc/redis/sentinel/data:/data
- /mnt/epdc/redis/sentinel/log:/usr/local/redis/sentinel/log
- /mnt/epdc/redis/sentinel/dir:/usr/local/redis/sentinel
networks:
epdc_network:
ipv4_address: 172.19.0.13
sentinel3:
image: redis
container_name: redis-sentinel-3
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
volumes:
- /mnt/epdc/redis/sentinel/conf/sentinel2.conf:/usr/local/etc/redis/sentinel.conf
- /mnt/epdc/redis/sentinel/data2:/data
- /mnt/epdc/redis/sentinel/log2:/usr/local/redis/sentinel/log
- /mnt/epdc/redis/sentinel/dir2:/usr/local/redis/sentinel
networks:
epdc_network:
ipv4_address: 172.19.0.14
networks:
epdc_network:
external: true

8
esua-epdc/docker-compose/jinshui/node04/Readme.md

@ -0,0 +1,8 @@
1. 创建网络:
```
docker network create -d bridge --subnet 172.19.0.0/24 epdc_network
```
2. 执行1-mysql中的docker-compose.yml
3. 执行2-nacos中的docker-compose.yml

39
esua-epdc/docker-compose/jinshui/node04/fastdfs/Dockerfile

@ -0,0 +1,39 @@
# centos 7
FROM centos:7
# 添加配置文件
ADD conf/client.conf /etc/fdfs/
ADD conf/http.conf /etc/fdfs/
ADD conf/mime.types /etc/fdfs/
ADD conf/storage.conf /etc/fdfs/
ADD conf/tracker.conf /etc/fdfs/
ADD fastdfs.sh /home
ADD conf/nginx.conf /etc/fdfs/
ADD conf/mod_fastdfs.conf /etc/fdfs
# run
RUN yum install git gcc gcc-c++ make automake autoconf libtool pcre pcre-devel zlib zlib-devel openssl-devel wget vim -y \
&& cd /usr/local/src \
&& git clone https://github.com/happyfish100/libfastcommon.git --depth 1 \
&& git clone https://github.com/happyfish100/fastdfs.git --depth 1 \
&& git clone https://github.com/happyfish100/fastdfs-nginx-module.git --depth 1 \
&& wget http://nginx.org/download/nginx-1.15.4.tar.gz \
&& tar -zxvf nginx-1.15.4.tar.gz \
&& mkdir /home/dfs \
&& cd /usr/local/src/ \
&& cd libfastcommon/ \
&& ./make.sh && ./make.sh install \
&& cd ../ \
&& cd fastdfs/ \
&& ./make.sh && ./make.sh install \
&& cd ../ \
&& cd nginx-1.15.4/ \
&& ./configure --add-module=/usr/local/src/fastdfs-nginx-module/src/ \
&& make && make install \
&& chmod +x /home/fastdfs.sh
# export config
VOLUME /etc/fdfs
EXPOSE 22122 23000 8888 80
ENTRYPOINT ["/home/fastdfs.sh"]

45
esua-epdc/docker-compose/jinshui/node04/fastdfs/README.md

@ -0,0 +1,45 @@
# FastDFS Dockerfile network (网络版本)
## 声明
其实并没什么区别 教程是在上一位huayanYu(小锅盖)和 Wiki的作者 的基础上进行了一些修改,本质上还是huayanYu(小锅盖) 和 Wiki 上的作者写的教程
## 目录介绍
### conf
Dockerfile 所需要的一些配置文件
当然你也可以对这些文件进行一些修改 比如 storage.conf 里面的 bast_path 等相关
## 使用方法
需要注意的是 你需要在运行容器的时候制定宿主机的ip 用参数 FASTDFS_IPADDR 来指定
```
docker run -d -e FASTDFS_IPADDR=192.168.1.234 -p 8888:8888 -p 22122:22122 -p 23000:23000 -p 8011:80 --name test-fast 镜像id/镜像名称
```
## 后记
本质上 local 版本与 network 版本无区别
## Statement
In fact, there is no difference between the tutorials written by Huayan Yu and Wiki on the basis of their previous authors. In essence, they are also tutorials written by the authors of Huayan Yu and Wiki.
## Catalogue introduction
### conf
Dockerfile Some configuration files needed
Of course, you can also make some modifications to these files, such as bast_path in storage. conf, etc.
## Usage method
Note that you need to specify the host IP when running the container with the parameter FASTDFS_IPADDR
Here's a sample docker run instruction
```
docker run -d -e FASTDFS_IPADDR=192.168.1.234 -p 8888:8888 -p 22122:22122 -p 23000:23000 -p 8011:80 --name test-fast 镜像id/镜像名称
```
## Epilogue
Essentially, there is no difference between the local version and the network version.

63
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/client.conf

@ -0,0 +1,63 @@
# connect timeout in seconds
# default value is 30s
connect_timeout=30
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store log files
base_path=/home/dfs
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=172.19.0.30:22122
tracker_server=172.20.0.30:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# if load FastDFS parameters from tracker server
# since V4.05
# default value is false
load_fdfs_parameters_from_tracker=false
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V4.05
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V4.05
storage_ids_filename = storage_ids.conf
#HTTP settings
http.tracker_server_port=80
#use "#include" directive to include HTTP other settiongs
##include http.conf

29
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/http.conf

@ -0,0 +1,29 @@
# HTTP default content type
http.default_content_type = application/octet-stream
# MIME types mapping filename
# MIME types file format: MIME_type extensions
# such as: image/jpeg jpeg jpg jpe
# you can use apache's MIME file: mime.types
http.mime_types_filename=mime.types
# if use token to anti-steal
# default value is false (0)
http.anti_steal.check_token=false
# token TTL (time to live), seconds
# default value is 600
http.anti_steal.token_ttl=900
# secret key to generate anti-steal token
# this parameter must be set when http.anti_steal.check_token set to true
# the length of the secret key should not exceed 128 bytes
http.anti_steal.secret_key=FastDFS1234567890
# return the content of the file when check token fail
# default value is empty (no file sepecified)
http.anti_steal.token_check_fail=/home/yuqing/fastdfs/conf/anti-steal.jpg
# if support multi regions for HTTP Range
# default value is true
http.multi_range.enabed = true

1065
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/mime.types

File diff suppressed because it is too large

134
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/mod_fastdfs.conf

@ -0,0 +1,134 @@
# connect timeout in seconds
# default value is 30s
connect_timeout=2
# network recv and send timeout in seconds
# default value is 30s
network_timeout=30
# the base path to store log files
base_path=/tmp
# if load FastDFS parameters from tracker server
# since V1.12
# default value is false
load_fdfs_parameters_from_tracker=true
# storage sync file max delay seconds
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V1.12
# default value is 86400 seconds (one day)
storage_sync_file_max_delay = 86400
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V1.13
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V1.13
storage_ids_filename = storage_ids.conf
# FastDFS tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
# valid only when load_fdfs_parameters_from_tracker is true
tracker_server=172.19.0.30:22122
tracker_server=172.20.0.30:22122
# the port of the local storage server
# the default value is 23000
storage_server_port=23000
# the group name of the local storage server
group_name=group1
# if the url / uri including the group name
# set to false when uri like /M00/00/00/xxx
# set to true when uri like ${group_name}/M00/00/00/xxx, such as group1/M00/xxx
# default value is false
url_have_group_name = true
# path(disk or mount point) count, default value is 1
# must same as storage.conf
store_path_count=1
# store_path#, based 0, if store_path0 not exists, it's value is base_path
# the paths must be exist
# must same as storage.conf
store_path0=/home/dfs
#store_path1=/home/yuqing/fastdfs1
# standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# set the log filename, such as /usr/local/apache2/logs/mod_fastdfs.log
# empty for output to stderr (apache and nginx error_log file)
log_filename=
# response mode when the file not exist in the local file system
## proxy: get the content from other storage server, then send to client
## redirect: redirect to the original storage server (HTTP Header is Location)
response_mode=proxy
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# this paramter used to get all ip address of the local host
# default values is empty
if_alias_prefix=
# use "#include" directive to include HTTP config file
# NOTE: #include is an include directive, do NOT remove the # before include
#include http.conf
# if support flv
# default value is false
# since v1.15
flv_support = true
# flv file extension name
# default value is flv
# since v1.15
flv_extension = flv
# set the group count
# set to none zero to support multi-group on this storage server
# set to 0 for single group only
# groups settings section as [group1], [group2], ..., [groupN]
# default value is 0
# since v1.14
group_count = 0
# group settings for group #1
# since v1.14
# when support multi-group on this storage server, uncomment following section
#[group1]
#group_name=group1
#storage_server_port=23000
#store_path_count=2
#store_path0=/home/yuqing/fastdfs
#store_path1=/home/yuqing/fastdfs1
# group settings for group #2
# since v1.14
# when support multi-group, uncomment following section as neccessary
#[group2]
#group_name=group2
#storage_server_port=23000
#store_path_count=1
#store_path0=/home/yuqing/fastdfs

127
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/nginx.conf

@ -0,0 +1,127 @@
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root html;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
server {
listen 8888;
server_name localhost;
location ~/group[0-9]/ {
ngx_fastdfs_module;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}

287
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/storage.conf

@ -0,0 +1,287 @@
# is this config file disabled
# false for enabled
# true for disabled
disabled=false
# the name of the group this storage server belongs to
#
# comment or remove this item for fetching from tracker server,
# in this case, use_storage_id must set to true in tracker.conf,
# and storage_ids.conf must be configed correctly.
group_name=epdcFile
# bind an address of this host
# empty for bind all addresses of this host
bind_addr=
# if bind an address of this host when connect to other servers
# (this storage server as a client)
# true for binding the address configed by above parameter: "bind_addr"
# false for binding any address of this host
client_bind=true
# the storage server port
port=23000
# connect timeout in seconds
# default value is 30s
connect_timeout=10
# network timeout in seconds
# default value is 30s
network_timeout=60
# heart beat interval in seconds
heart_beat_interval=30
# disk usage report interval in seconds
stat_report_interval=60
# the base path to store data and log files
base_path=/home/dfs
# max concurrent connections the server supported
# default value is 256
# more max_connections means more memory will be used
# you should set this parameter larger, eg. 10240
max_connections=1024
# the buff size to recv / send data
# this parameter must more than 8KB
# default value is 64KB
# since V2.00
buff_size = 256KB
# accept thread count
# default value is 1
# since V4.07
accept_threads=1
# work thread count, should <= max_connections
# work thread deal network io
# default value is 4
# since V2.00
work_threads=4
# if disk read / write separated
## false for mixed read and write
## true for separated read and write
# default value is true
# since V2.00
disk_rw_separated = true
# disk reader thread count per store base path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_reader_threads = 1
# disk writer thread count per store base path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_writer_threads = 1
# when no entry to sync, try read binlog again after X milliseconds
# must > 0, default value is 200ms
sync_wait_msec=50
# after sync a file, usleep milliseconds
# 0 for sync successively (never call usleep)
sync_interval=0
# storage sync start time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_start_time=00:00
# storage sync end time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_end_time=23:59
# write to the mark file after sync N files
# default value is 500
write_mark_file_freq=500
# path(disk or mount point) count, default value is 1
store_path_count=1
# store_path#, based 0, if store_path0 not exists, it's value is base_path
# the paths must be exist
store_path0=/home/dfs
#store_path1=/home/dfs2
# subdir_count * subdir_count directories will be auto created under each
# store_path (disk), value can be 1 to 256, default value is 256
subdir_count_per_path=256
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=172.19.0.30:22122
tracker_server=172.20.0.30:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user=
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts=*
# the mode of the files distributed to the data path
# 0: round robin(default)
# 1: random, distributted by hash code
file_distribute_path_mode=0
# valid when file_distribute_to_path is set to 0 (round robin),
# when the written file count reaches this number, then rotate to next path
# default value is 100
file_distribute_rotate_count=100
# call fsync to disk when write big file
# 0: never call fsync
# other: call fsync when written bytes >= this bytes
# default value is 0 (never call fsync)
fsync_after_written_bytes=0
# sync log buff to disk every interval seconds
# must > 0, default value is 10 seconds
sync_log_buff_interval=10
# sync binlog buff / cache to disk every interval seconds
# default value is 60 seconds
sync_binlog_buff_interval=10
# sync storage stat info to disk every interval seconds
# default value is 300 seconds
sync_stat_file_interval=300
# thread stack size, should >= 512KB
# default value is 512KB
thread_stack_size=512KB
# the priority as a source server for uploading file.
# the lower this value, the higher its uploading priority.
# default value is 10
upload_priority=10
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# default values is empty
if_alias_prefix=
# if check file duplicate, when set to true, use FastDHT to store file indexes
# 1 or yes: need check
# 0 or no: do not check
# default value is 0
check_file_duplicate=0
# file signature method for check file duplicate
## hash: four 32 bits hash code
## md5: MD5 signature
# default value is hash
# since V4.01
file_signature_method=hash
# namespace for storing file indexes (key-value pairs)
# this item must be set when check_file_duplicate is true / on
key_namespace=FastDFS
# set keep_alive to 1 to enable persistent connection with FastDHT servers
# default value is 0 (short connection)
keep_alive=0
# you can use "#include filename" (not include double quotes) directive to
# load FastDHT server list, when the filename is a relative path such as
# pure filename, the base path is the base path of current/this config file.
# must set FastDHT server list when check_file_duplicate is true / on
# please see INSTALL of FastDHT for detail
##include /home/yuqing/fastdht/conf/fdht_servers.conf
# if log to access log
# default value is false
# since V4.00
use_access_log = false
# if rotate the access log every day
# default value is false
# since V4.00
rotate_access_log = false
# rotate access log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.00
access_log_rotate_time=00:00
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time=00:00
# rotate access log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_access_log_size = 0
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if skip the invalid record when sync file
# default value is false
# since V4.02
file_sync_skip_invalid_record=false
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# use the ip address of this storage server if domain_name is empty,
# else this domain name will ocur in the url redirected by the tracker server
http.domain_name=
# the port of the web server on this storage server
http.server_port=8888

278
esua-epdc/docker-compose/jinshui/node04/fastdfs/conf/tracker.conf

@ -0,0 +1,278 @@
# is this config file disabled
# false for enabled
# true for disabled
disabled=false
# bind an address of this host
# empty for bind all addresses of this host
bind_addr=
# the tracker server port
port=22122
# connect timeout in seconds
# default value is 30s
connect_timeout=10
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store data and log files
base_path=/home/dfs
# max concurrent connections this server supported
# you should set this parameter larger, eg. 102400
max_connections=1024
# accept thread count
# default value is 1
# since V4.07
accept_threads=1
# work thread count, should <= max_connections
# default value is 4
# since V2.00
work_threads=4
# min buff size
# default value 8KB
min_buff_size = 8KB
# max buff size
# default value 128KB
max_buff_size = 128KB
# the method of selecting group to upload files
# 0: round robin
# 1: specify group
# 2: load balance, select the max free space group to upload file
store_lookup=2
# which group to upload file
# when store_lookup set to 1, must set store_group to the group name
store_group=group2
# which storage server to upload file
# 0: round robin (default)
# 1: the first server order by ip address
# 2: the first server order by priority (the minimal)
# Note: if use_trunk_file set to true, must set store_server to 1 or 2
store_server=0
# which path(means disk or mount point) of the storage server to upload file
# 0: round robin
# 2: load balance, select the max free space path to upload file
store_path=0
# which storage server to download file
# 0: round robin (default)
# 1: the source storage server which the current file uploaded to
download_server=0
# reserved storage space for system or other applications.
# if the free(available) space of any stoarge server in
# a group <= reserved_storage_space,
# no file can be uploaded to this group.
# bytes unit can be one of follows:
### G or g for gigabyte(GB)
### M or m for megabyte(MB)
### K or k for kilobyte(KB)
### no unit for byte(B)
### XX.XX% as ratio such as reserved_storage_space = 10%
reserved_storage_space = 1%
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user=
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts=*
# sync log buff to disk every interval seconds
# default value is 10 seconds
sync_log_buff_interval = 10
# check storage server alive interval seconds
check_active_interval = 120
# thread stack size, should >= 64KB
# default value is 64KB
thread_stack_size = 64KB
# auto adjust when the ip address of the storage server changed
# default value is true
storage_ip_changed_auto_adjust = true
# storage sync file max delay seconds
# default value is 86400 seconds (one day)
# since V2.00
storage_sync_file_max_delay = 86400
# the max time of storage sync a file
# default value is 300 seconds
# since V2.00
storage_sync_file_max_time = 300
# if use a trunk file to store several small files
# default value is false
# since V3.00
use_trunk_file = false
# the min slot size, should <= 4KB
# default value is 256 bytes
# since V3.00
slot_min_size = 256
# the max slot size, should > slot_min_size
# store the upload file to trunk file when it's size <= this value
# default value is 16MB
# since V3.00
slot_max_size = 16MB
# the trunk file size, should >= 4MB
# default value is 64MB
# since V3.00
trunk_file_size = 64MB
# if create trunk file advancely
# default value is false
# since V3.06
trunk_create_file_advance = false
# the time base to create trunk file
# the time format: HH:MM
# default value is 02:00
# since V3.06
trunk_create_file_time_base = 02:00
# the interval of create trunk file, unit: second
# default value is 38400 (one day)
# since V3.06
trunk_create_file_interval = 86400
# the threshold to create trunk file
# when the free trunk file size less than the threshold, will create
# the trunk files
# default value is 0
# since V3.06
trunk_create_file_space_threshold = 20G
# if check trunk space occupying when loading trunk free spaces
# the occupied spaces will be ignored
# default value is false
# since V3.09
# NOTICE: set this parameter to true will slow the loading of trunk spaces
# when startup. you should set this parameter to true when neccessary.
trunk_init_check_occupying = false
# if ignore storage_trunk.dat, reload from trunk binlog
# default value is false
# since V3.10
# set to true once for version upgrade when your version less than V3.10
trunk_init_reload_from_binlog = false
# the min interval for compressing the trunk binlog file
# unit: second
# default value is 0, 0 means never compress
# FastDFS compress the trunk binlog when trunk init and trunk destroy
# recommand to set this parameter to 86400 (one day)
# since V5.01
trunk_compress_binlog_min_interval = 0
# if use storage ID instead of IP address
# default value is false
# since V4.00
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# since V4.00
storage_ids_filename = storage_ids.conf
# id type of the storage server in the filename, values are:
## ip: the ip address of the storage server
## id: the server id of the storage server
# this paramter is valid only when use_storage_id set to true
# default value is ip
# since V4.03
id_type_in_filename = ip
# if store slave file use symbol link
# default value is false
# since V4.01
store_slave_file_use_link = false
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time=00:00
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# HTTP port on this tracker server
http.server_port=8080
# check storage HTTP server alive interval seconds
# <= 0 for never check
# default value is 30
http.check_alive_interval=30
# check storage HTTP server alive type, values are:
# tcp : connect to the storge server with HTTP port only,
# do not request and get response
# http: storage check alive url must return http status 200
# default value is tcp
http.check_alive_type=tcp
# check storage HTTP server alive uri/url
# NOTE: storage embed HTTP server support uri: /status.html
http.check_alive_uri=/status.html

26
esua-epdc/docker-compose/jinshui/node04/fastdfs/fastdfs.sh

@ -0,0 +1,26 @@
#!/bin/bash
new_val=$FASTDFS_IPADDR
old="com.ikingtech.ch116221"
sed -i "s/$old/$new_val/g" /etc/fdfs/client.conf
sed -i "s/$old/$new_val/g" /etc/fdfs/storage.conf
sed -i "s/$old/$new_val/g" /etc/fdfs/mod_fastdfs.conf
cat /etc/fdfs/client.conf > /etc/fdfs/client.txt
cat /etc/fdfs/storage.conf > /etc/fdfs/storage.txt
cat /etc/fdfs/mod_fastdfs.conf > /etc/fdfs/mod_fastdfs.txt
mv /usr/local/nginx/conf/nginx.conf /usr/local/nginx/conf/nginx.conf.t
cp /etc/fdfs/nginx.conf /usr/local/nginx/conf
echo "start trackerd"
/etc/init.d/fdfs_trackerd start
echo "start storage"
/etc/init.d/fdfs_storaged start
echo "start nginx"
/usr/local/nginx/sbin/nginx
tail -f /dev/null

47
esua-epdc/docker-compose/jinshui/node04/fastdfs/nginx/docker-compose.yml

@ -0,0 +1,47 @@
version: '3.7'
services:
fastdfs-tracker:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/tracker_data:/fastdfs/tracker/data
command: tracker
networks:
epdc_network:
ipv4_address: 172.19.0.30
fastdfs-storage:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/storage_data:/fastdfs/storage/data
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: storage
networks:
epdc_network:
ipv4_address: 172.19.0.31
fastdfs-nginx:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/nginx/nginx.conf:/etc/nginx/conf/nginx.conf
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: nginx
networks:
epdc_network:
ipv4_address: 172.19.0.32
networks:
epdc_network:
external: true

47
esua-epdc/docker-compose/jinshui/node04/fastdfs/storage/docker-compose.yml

@ -0,0 +1,47 @@
version: '3.7'
services:
fastdfs-tracker:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/tracker_data:/fastdfs/tracker/data
command: tracker
networks:
epdc_network:
ipv4_address: 172.19.0.30
fastdfs-storage:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/storage_data:/fastdfs/storage/data
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: storage
networks:
epdc_network:
ipv4_address: 172.19.0.31
fastdfs-nginx:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/nginx/nginx.conf:/etc/nginx/conf/nginx.conf
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: nginx
networks:
epdc_network:
ipv4_address: 172.19.0.32
networks:
epdc_network:
external: true

47
esua-epdc/docker-compose/jinshui/node04/fastdfs/tracker/docker-compose.yml

@ -0,0 +1,47 @@
version: '3.7'
services:
fastdfs-tracker:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/tracker_data:/fastdfs/tracker/data
command: tracker
networks:
epdc_network:
ipv4_address: 172.19.0.30
fastdfs-storage:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/storage_data:/fastdfs/storage/data
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: storage
networks:
epdc_network:
ipv4_address: 172.19.0.31
fastdfs-nginx:
image: season/fastdfs
restart: always
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /mnt/epdc/fdfs/nginx/nginx.conf:/etc/nginx/conf/nginx.conf
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path
environment:
TRACKER_SERVER: 172.19.0.30:22122
command: nginx
networks:
epdc_network:
ipv4_address: 172.19.0.32
networks:
epdc_network:
external: true

14
esua-epdc/docker-compose/prod/master/application/10.5.34.162-master/docker-compose.yml

@ -1,14 +0,0 @@
version: '3.7'
services:
web:
image: nginx
ports:
- 443:443
volumes:
- /mnt/nginx/html:/usr/share/nginx/html
- /mnt/nginx/conf/nginx.conf:/etc/nginx/nginx.conf
- /mnt/nginx/conf.d:/etc/nginx/conf.d
- /mnt/nginx/logs:/var/log/nginx
restart: always
container_name: nginx_master

50
esua-epdc/docker-compose/prod/master/picture/10.5.34.166/docker-compose.yml

@ -1,50 +0,0 @@
version: '3.7'
services:
epdc-events-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-events-server:prod
ports:
- "9066:9066"
epdc-gateway:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-gateway:prod
ports:
- "9094:9094"
epdc-auth:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-auth:prod
ports:
- "9056:9056"
epdc-admin-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-admin-server:prod
ports:
- "9055:9055"
epdc-oss-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-oss-server:prod
ports:
- "9065:9065"
epdc-api-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-api-server:prod
ports:
- "9040:9040"
epdc-news-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-news-server:prod
prort:
- "9064:9064"
epdc-user-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-user-server:prod
prort:
- "9068:9068"
epdc-websocket-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-websocket-server:prod
prort:
- "9988:9988"
epdc-kpi-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-kpi-server:prod
prort:
- "9987:9987"
epdc-group-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-group-server:prod
prort:
- "9063:9063"
epdc-message-server:
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-message-server:prod
prort:
- "9062:9062"

25
esua-epdc/docker-compose/prod/node01/3-nacos/docker-compose.yml

@ -0,0 +1,25 @@
version: "3.7"
services:
nacos1:
image: nacos/nacos-server:latest
container_name: nacos1
ports:
- 10001:10001
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /mnt/epdc/nacos/logs/nacos1:/home/nacos/logs
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_PORT: 10001
NACOS_SERVER_IP: 172.16.0.53 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.16.0.53:10001 172.16.0.51:10001 172.16.0.51:10002 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_SERVICE_HOST: 172.16.0.52 #mysql配置,Master为主节点,Slave为从节点
MYSQL_SERVICE_PORT: 3306
MYSQL_SERVICE_DB_NAME: esua_epdc_nacos
MYSQL_SERVICE_USER: epdc
MYSQL_SERVICE_PASSWORD: Elink@833066
MYSQL_DATABASE_NUM: 2
restart: always

73
esua-epdc/docker-compose/prod/node01/4-seata/conf/registry.conf

@ -0,0 +1,73 @@
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "47.104.208.104:80"
namespace = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}

39
esua-epdc/docker-compose/prod/node01/4-seata/docker-compose.yml

@ -0,0 +1,39 @@
#环境变量
#seata-server 支持以下环境变量:
#
#SEATA_IP
#可选, 指定seata-server启动的IP, 该IP用于向注册中心注册时使用, 如eureka等
#
#SEATA_PORT
#可选, 指定seata-server启动的端口, 默认为 8091
#
#STORE_MODE
#可选, 指定seata-server的事务日志存储方式, 支持db 和 file, 默认是 file
#
#SERVER_NODE
#可选, 用于指定seata-server节点ID, 如 1,2,3..., 默认为 1
#
#SEATA_ENV
#可选, 指定 seata-server 运行环境, 如 dev, test 等, 服务启动时会使用 registry-dev.conf 这样的配置
#
#SEATA_CONFIG_NAME
#可选, 指定配置文件位置, 如 file:/root/registry, 将会加载 /root/registry.conf 作为配置文件
version: "3.7"
services:
seata-server1:
container_name: seata-server1
image: seataio/seata-server:latest
ports:
- 9608:9608
environment:
SEATA_IP: 47.104.85.99
SEATA_PORT: 9608
STORE_MODE: db
SERVER_NODE: 1
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/seata/seata-config:/root/seata-config
- /opt/epdc/seata/logs:/root/logs/seata

84
esua-epdc/docker-compose/prod/node01/4-seata/script/config.txt

@ -0,0 +1,84 @@
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableClientBatchSendRequest=false
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThread-size=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=8
transport.shutdown.wait=3
service.vgroup_mapping.my_test_tx_group=default
service.vgroup_mapping.epdc-api-server-fescar-service-group=default
service.vgroup_mapping.epdc-demo-server-fescar-service-group=default
service.vgroup_mapping.epdc-user-server-fescar-service-group=default
service.vgroup_mapping.epdc-services-server-fescar-service-group=default
service.vgroup_mapping.epdc-party-server-fescar-service-group=default
service.vgroup_mapping.epdc-heart-server-fescar-service-group=default
service.vgroup_mapping.epdc-neighbor-server-fescar-service-group=default
service.vgroup_mapping.epdc-oss-server-fescar-service-group=default
service.vgroup_mapping.epdc-message-server-fescar-service-group=default
service.vgroup_mapping.epdc-news-server-fescar-service-group=default
service.vgroup_mapping.epdc-job-server-fescar-service-group=default
service.vgroup_mapping.epdc-admin-server-fescar-service-group=default
service.vgroup_mapping.epdc-activiti-server-fescar-service-group=default
service.vgroup_mapping.epdc-kpi-server-fescar-service-group=default
service.vgroup_mapping.epdc-points-server-fescar-service-group=default
service.vgroup_mapping.epdc-webservice-server-fescar-service-group=default
service.vgroup_mapping.epdc-events-server-fescar-service-group=default
service.vgroup_mapping.epdc-custom-server-fescar-service-group=default
service.vgroup_mapping.epdc-analysis-server-fescar-service-group=default
service.vgroup_mapping.epdc-group-server-fescar-service-group=default
service.enableDegrade=false
service.disableGlobalTransaction=false
client.rm.async.commit.buffer.limit=10000
client.rm.lock.retry.internal=10
client.rm.lock.retry.times=30
client.rm.report.retry.count=5
client.rm.lock.retry.policy.branch-rollback-on-conflict=true
client.rm.table.meta.check.enable=false
client.rm.report.success.enable=true
client.tm.commit.retry.count=5
client.tm.rollback.retry.count=5
store.mode=db
store.file.dir=file_store/data
store.file.maxBranchSessionSize=16384
store.file.maxGlobalSessionSize=512
store.file.fileWriteBufferCacheSize=16384
store.file.flushDiskMode=async
store.file.session.reload.read_size=100
store.db.datasource=dbcp
store.db.dbType=mysql
store.db.driverClassName=com.mysql.jdbc.Driver
store.db.url=jdbc:mysql://172.31.171.61:9600/epdc_seata?useUnicode=true
store.db.user=seata
store.db.password=elink888
store.db.minConn=1
store.db.maxConn=3
store.db.global.table=global_table
store.db.branch.table=branch_table
store.db.queryLimit=100
store.db.lockTable=lock_table
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.max.commit.retry.timeout=-1
server.max.rollback.retry.timeout=-1
server.rollback.retry.timeout.unlock.enable=false
client.undo.data.validation=true
client.undo.log.serialization=jackson
server.undo.log.save.days=7
server.undo.log.delete.period=86400000
client.undo.log.table=undo_log
client.log.exceptionRate=100
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898

89
esua-epdc/docker-compose/prod/node01/4-seata/script/nacos-config.sh

@ -0,0 +1,89 @@
#!/usr/bin/env bash
# Copyright 1999-2019 Seata.io Group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at、
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
while getopts ":h:p:g:t:" opt
do
case $opt in
h)
host=$OPTARG
;;
p)
port=$OPTARG
;;
g)
group=$OPTARG
;;
t)
tenant=$OPTARG
;;
?)
echo "\033[31m USAGE OPTION: $0 [-h host] [-p port] [-g group] [-t tenant] \033[0m"
exit 1
;;
esac
done
if [[ -z ${host} ]]; then
host=localhost
fi
if [[ -z ${port} ]]; then
port=8848
fi
if [[ -z ${group} ]]; then
group="SEATA_GROUP"
fi
if [[ -z ${tenant} ]]; then
tenant=""
fi
nacosAddr=$host:$port
contentType="content-type:application/json;charset=UTF-8"
echo "set nacosAddr=$nacosAddr"
echo "set group=$group"
failCount=0
tempLog=$(mktemp -u)
function addConfig() {
curl -X POST -H "${1}" "http://$2/nacos/v1/cs/configs?dataId=$3&group=$group&content=$4&tenant=$tenant" >"${tempLog}" 2>/dev/null
if [[ -z $(cat "${tempLog}") ]]; then
echo "\033[31m Please check the cluster status. \033[0m"
exit 1
fi
if [[ $(cat "${tempLog}") =~ "true" ]]; then
echo "Set $3=$4\033[32m successfully \033[0m"
else
echo "Set $3=$4\033[31m failure \033[0m"
(( failCount++ ))
fi
}
count=0
for line in $(cat $(dirname "$PWD")/config.txt); do
(( count++ ))
key=${line%%=*}
value=${line#*=}
addConfig "${contentType}" "${nacosAddr}" "${key}" "${value}"
done
echo "========================================================================="
echo " Complete initialization parameters, \033[32m total-count:$count \033[0m, \033[31m failure-count:$failCount \033[0m"
echo "========================================================================="
if [[ ${failCount} -eq 0 ]]; then
echo "\033[32m Init nacos config finished, please start seata-server. \033[0m"
else
echo "\033[31m init nacos config fail. \033[0m"
fi

48
esua-epdc/docker-compose/prod/node02/2-nacos/docker-compose.yml

@ -0,0 +1,48 @@
version: "3.7"
services:
nacos2:
image: nacos/nacos-server:latest
container_name: nacos2
ports:
- 10001:10001
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nacos/logs/nacos2:/home/nacos/logs
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_PORT: 9602
NACOS_SERVER_IP: 172.31.171.61 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.31.171.61:9601 172.31.171.61:9602 172.31.171.62:9601 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_SERVICE_HOST: 172.31.171.61 #mysql配置,Master为主节点,Slave为从节点
MYSQL_SERVICE_PORT: 9600
MYSQL_SERVICE_DB_NAME: epdc_nacos
MYSQL_SERVICE_USER: nacos
MYSQL_SERVICE_PASSWORD: elink888
MYSQL_DATABASE_NUM: 2
restart: always
nacos3:
image: nacos/nacos-server:latest
container_name: nacos3
ports:
- 9601:9601
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nacos/logs:/home/nacos/logs
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
NACOS_SERVER_PORT: 9601
NACOS_SERVER_IP: 172.31.171.62 #多网卡情况下,指定ip或网卡
NACOS_SERVERS: 172.31.171.61:9601 172.31.171.61:9602 172.31.171.62:9601 #集群中其它节点[ip1:port ip2:port ip3:port]
MYSQL_SERVICE_HOST: 172.31.171.61 #mysql配置,Master为主节点,Slave为从节点
MYSQL_SERVICE_PORT: 9600
MYSQL_SERVICE_DB_NAME: epdc_nacos
MYSQL_SERVICE_USER: nacos
MYSQL_SERVICE_PASSWORD: elink888
MYSQL_DATABASE_NUM: 2
restart: always

73
esua-epdc/docker-compose/prod/node02/5-seata/conf/registry.conf

@ -0,0 +1,73 @@
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
serverAddr = "172.31.171.61:80"
namespace = ""
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "172.31.171.61:80"
namespace = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}

21
esua-epdc/docker-compose/prod/master/application/10.5.34.163-slave/docker-compose.yml → esua-epdc/docker-compose/prod/node02/5-seata/docker-compose.yml

@ -21,15 +21,18 @@
version: "3.7"
services:
seata-server:
seata-server2:
container_name: seata-server2
image: seataio/seata-server:latest
hostname: seata-server
ports:
- 9001:8091
- 9608:9608
environment:
- SEATA_PORT=8091
- SEATA_IP=
- STORE_MODE=db
- SERVER_NODE=1
expose:
- 8091
SEATA_IP: 114.215.125.123
SEATA_PORT: 9608
STORE_MODE: db
SERVER_NODE: 2
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/seata/seata-config:/root/seata-config

39
esua-epdc/docker-compose/test/node01/1-mysql/conf/mysql.conf.cnf

@ -18,28 +18,53 @@ skip-host-cache
skip-name-resolve
# 数据库编号, 要具有唯一性, 不能跟其他数据库重复, 方便同步区分
server-id = 21
server-id = 2
# mysql日志
log_bin = /var/lib/mysql/mysql-bin.log
log-bin-index=slave-relay-bin.index
log-bin-index=/var/lib/mysql/slave-relay-bin.index
#日志记录的格式
binlog_format=MIXED
binlog_format=mixed
#单个日志文件最大
max_binlog_size = 100M 
max_binlog_size=100M 
#从库建议开启,有利于数据一致性
relay_log_recovery = 1   
relay_log_recovery=1   
#如果从库还会用做主库,建议开启
log_slave_updates = 1   
log_slave_updates=1   
# 中继日志:存储所有主库TP过来的binlog事件主库binlog:记录主库发生过的修改事件
# relay-log = /var/lib/mysql/mysql-relay-bin.log
relay-log=/var/lib/mysql/mysql-relay-bin.log
relay-log-index=relay-log.index
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
# 慢查询
slow_query_log=on
long_query_time=3
slow_query_log_file=/var/lib/mysql/mysql-slow.log
# GTID
gtid_mode=on
enforce_gtid_consistency=on
# 此参数设置为ON时,新创建的InnoDB 数据表的数据会存储于一个单独的文件中,而不是存储于共享的ibdata表空间。
innodb-file-per-table=1
innodb_flush_log_at_trx_commit=2
log_warnings=1
# 只读配置
read_only=1
[mysqld_safe]
log-error=/var/lib/mysql/mysqld.log
pid-file=/var/lib/mysql/mysqld.pid
INSERT INTO esua_epdc_events.epdc_events VALUES ('78e89fe1d35170ed3b159a5abdf6eaf0','a897d6b6d8b97bfaaafbb031d9a3d7a2','郭口路3号-马女士','https://wx.qlogo.cn/mmopen/vi_32/q5bEiaAeUgBDj5VbESXLicsic9CSpOeqCGCQLBWXic4lLCf5ayqib5e3DRAeNBc7zCzRMxFyuE5ZhwbyYtuJyTHQZdg/132','1','13156396355','春江水暖鸭先知','青啤社区第四网格',1222016013611073538,'山东省青岛市市北区丹山路88号',36.0762400000,120.3402690000,4,'生机勃勃',0,0,0,0,'0',0,'APP_USER','2020-04-09 15:41:56','1222040755663568897','2020-04-09 18:35:45','0','','','','1175270520603930625,1202809196967714817,1202809398139117570','市北区党委-登州路街道-青啤社区','13','13','其它','1175270520603930625,1202809196967714817,1202809398139117570,1222016013611073538','市北区党委-登州路街道-青啤社区-青啤社区第四网格');

1
esua-epdc/docker-compose/test/node01/2-nginx/docker-compose.yml

@ -7,6 +7,7 @@ services:
- 443:443
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nginx/html:/usr/share/nginx/html
- /opt/epdc/nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/epdc/nginx/conf.d:/etc/nginx/conf.d:ro

1
esua-epdc/docker-compose/test/node01/3-nacos/docker-compose.yml

@ -7,6 +7,7 @@ services:
- 9601:9601
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nacos/logs:/home/nacos/logs
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:

3
esua-epdc/docker-compose/test/node01/4-redis/docker-compose.yml

@ -9,6 +9,7 @@ services:
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-slave01.conf:/usr/local/etc/redis/redis.conf
@ -22,6 +23,7 @@ services:
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-slave02.conf:/usr/local/etc/redis/redis.conf
@ -35,6 +37,7 @@ services:
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-slave03.conf:/usr/local/etc/redis/redis.conf

1
esua-epdc/docker-compose/test/node01/5-seata/docker-compose.yml

@ -34,4 +34,5 @@ services:
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/seata/seata-config:/root/seata-config

35
esua-epdc/docker-compose/test/node02/1-mysql/conf/mysql.conf.cnf

@ -18,19 +18,44 @@ skip-host-cache
skip-name-resolve
# 数据库编号, 要具有唯一性, 不能跟其他数据库重复, 方便同步区分
server-id = 11
server-id=1
#如果从库还会用做主库,建议开启
log_slave_updates=1
# mysql日志
log_bin = /var/lib/mysql/master-bin.log
log-bin-index=master-bin.index
log_bin=/var/lib/mysql/master-bin.log
log-bin-index=/var/lib/mysql/master-bin.index
# binlog日志格式,mysql默认采用statement,建议使用 mixed(是statement和row模式的结合)
binlog_format = mixed
binlog_format=mixed
# 中继日志:存储所有主库TP过来的binlog事件主库binlog:记录主库发生过的修改事件
relay-log=/var/lib/mysql/relay-log
relay-log-index=/var/lib/mysql/relay-log.index
#单个日志文件最大
max_binlog_size = 100M
max_binlog_size=100M
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
# 慢查询
slow_query_log=on
long_query_time=1
slow_query_log_file=/data/mysql/mysql-slow.log
# GTID
gtid_mode=on
enforce_gtid_consistency=on
# 此参数设置为ON时,新创建的InnoDB 数据表的数据会存储于一个单独的文件中,而不是存储于共享的ibdata表空间。
innodb-file-per-table=1
innodb_flush_log_at_trx_commit=2
log_warnings=1
[mysqld_safe]
log-error=/var/lib/mysql/mysqld.log
pid-file=/var/lib/mysql/mysqld.pid

3
esua-epdc/docker-compose/test/node02/1-mysql/docker-compose.yml

@ -15,3 +15,6 @@ services:
- /opt/epdc/mysql/data:/var/lib/mysql
- /opt/epdc/mysql/conf/mysql.conf.cnf:/etc/mysql/conf.d/mysql.conf.cnf
restart: always
mysql -e "show databases;" -uroot -p| grep -Ev "Database|information_schema|mysql|performance_schema" |xargs mysqldump -uroot -p --single-transaction --master-data=2 --no-autocommit --databases --default-character-set=utf8 > mysql_dump.sql

4
esua-epdc/docker-compose/test/node02/2-nacos/docker-compose.yml

@ -6,6 +6,8 @@ services:
ports:
- 9601:9601
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nacos/logs/nacos1:/home/nacos/logs
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:
@ -28,6 +30,8 @@ services:
ports:
- 9602:9602
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/nacos/logs/nacos2:/home/nacos/logs
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
environment:

2
esua-epdc/docker-compose/test/node02/3-seata/docker-compose.yml

@ -33,5 +33,7 @@ services:
SERVER_NODE: 1
SEATA_CONFIG_NAME: file:/root/seata-config/registry
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/seata/seata-config:/root/seata-config
- /opt/epdc/seata/logs:/root/logs/seata

6
esua-epdc/docker-compose/test/node02/4-redis/docker-compose.yml

@ -8,6 +8,8 @@ services:
command: redis-server /usr/local/etc/redis/redis.conf
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-master01.conf:/usr/local/etc/redis/redis.conf
@ -20,6 +22,8 @@ services:
command: redis-server /usr/local/etc/redis/redis.conf
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-master02.conf:/usr/local/etc/redis/redis.conf
@ -32,6 +36,8 @@ services:
command: redis-server /usr/local/etc/redis/redis.conf
restart: always
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /opt/epdc/redis/log:/data/log
- /opt/epdc/redis/data:/data
- /opt/epdc/redis/conf/redis-master03.conf:/usr/local/etc/redis/redis.conf

4
esua-epdc/epdc-admin/epdc-admin-client/src/main/java/com/elink/esua/epdc/dto/AppMenuDTO.java

@ -132,4 +132,8 @@ public class AppMenuDTO extends TreeNode<SysMenuDTO> implements Serializable {
* 上级菜单名称
*/
private String parentName;
/**
* 绑定菜单id
*/
private Long bindingId;
}

61
esua-epdc/epdc-admin/epdc-admin-client/src/main/java/com/elink/esua/epdc/dto/DeptGridDTO.java

@ -0,0 +1,61 @@
/**
* Copyright (c) 2018 人人开源 All rights reserved.
* <p>
* https://www.renren.io
* <p>
* 版权所有侵权必究
*/
package com.elink.esua.epdc.dto;
import com.elink.esua.epdc.commons.tools.utils.TreeNode;
import com.elink.esua.epdc.commons.tools.validator.group.AddGroup;
import com.elink.esua.epdc.commons.tools.validator.group.DefaultGroup;
import com.elink.esua.epdc.commons.tools.validator.group.UpdateGroup;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import javax.validation.constraints.Min;
import javax.validation.constraints.NotBlank;
import javax.validation.constraints.NotNull;
import javax.validation.constraints.Null;
import java.io.Serializable;
import java.util.Date;
/**
* 部门管理
*
* @author Mark sunlightcs@gmail.com
* @since 1.0.0
*/
@Data
public class DeptGridDTO implements Serializable {
private static final long serialVersionUID = 1L;
/**
* 部门id
*/
private Long id;
/**
* 上级部门id
*/
private Long pid;
/**
* 部门名称
*/
private String name;
/**
* 上级部门名称
*/
private String parentName;
/**
* 部门总名称
*/
private String deptName;
}

5
esua-epdc/epdc-admin/epdc-admin-client/src/main/java/com/elink/esua/epdc/dto/SysAnalysisMenuDTO.java

@ -137,5 +137,10 @@ public class SysAnalysisMenuDTO implements Serializable {
*/
private String modelname;
/**
* 绑定菜单id
*/
private Long bindingId;
}

2
esua-epdc/epdc-admin/epdc-admin-server/Dockerfile

@ -1,5 +1,5 @@
# 基础镜像
FROM openjdk:8u242-jdk-buster
FROM openjdk:8
# 作者
MAINTAINER rongchao@elink-cn.com
# 对应pom.xml文件中的dockerfile-maven-plugin插件JAR_FILE的值

14
esua-epdc/epdc-admin/epdc-admin-server/pom.xml

@ -148,7 +148,7 @@
<nacos.register-enabled>false</nacos.register-enabled>
<nacos.server-addr>47.104.224.45:8848</nacos.server-addr>
<nacos.ip></nacos.ip>
<nacos.namespace>6a3577b4-7b79-43f6-aebb-9c3f31263f6a</nacos.namespace>
<nacos.namespace>a746dde3-7a13-4521-b986-7369b0b7c269</nacos.namespace>
<spring.zipkin.base-url>http://localhost:9411</spring.zipkin.base-url>
<!--小程序配置-->
@ -158,8 +158,8 @@
<work.wx.ma.appId>wx29b074840ef4bfd9</work.wx.ma.appId>
<work.wx.ma.secret>4adb1afccc69f205cdf5b521d74e2aca</work.wx.ma.secret>
<!--数据分析端小程序-->
<analysis.wx.ma.appId>wx9b6102a8ee5add65</analysis.wx.ma.appId>
<analysis.wx.ma.secret>394f47d4e08fc0fd629231d3f68a34dc</analysis.wx.ma.secret>
<analysis.wx.ma.appId>wxfa4afaa2b5f9c876</analysis.wx.ma.appId>
<analysis.wx.ma.secret>7db9f049c78c9a6cafa673deebe8330d</analysis.wx.ma.secret>
<!--RocketMQ-->
<rocketmq.name.server>47.104.85.99:9876;114.215.125.123:9876</rocketmq.name.server>
<rocketmq.producer.group>organizationGroup</rocketmq.producer.group>
@ -191,8 +191,8 @@
<work.wx.ma.appId>wx9f20a46906ab2c3e</work.wx.ma.appId>
<work.wx.ma.secret>dc13065f79429979d9f687d249eb5c4e</work.wx.ma.secret>
<!--数据分析端小程序-->
<analysis.wx.ma.appId>wx9b6102a8ee5add65</analysis.wx.ma.appId>
<analysis.wx.ma.secret>394f47d4e08fc0fd629231d3f68a34dc</analysis.wx.ma.secret>
<analysis.wx.ma.appId>wxfa4afaa2b5f9c876</analysis.wx.ma.appId>
<analysis.wx.ma.secret>7db9f049c78c9a6cafa673deebe8330d</analysis.wx.ma.secret>
<!--RocketMQ-->
<rocketmq.name.server>47.104.85.99:9876;114.215.125.123:9876</rocketmq.name.server>
<rocketmq.producer.group>organizationGroup</rocketmq.producer.group>
@ -230,8 +230,8 @@
<work.wx.ma.appId>wx9f20a46906ab2c3e</work.wx.ma.appId>
<work.wx.ma.secret>dc13065f79429979d9f687d249eb5c4e</work.wx.ma.secret>
<!--数据分析端小程序-->
<analysis.wx.ma.appId>wx9b6102a8ee5add65</analysis.wx.ma.appId>
<analysis.wx.ma.secret>394f47d4e08fc0fd629231d3f68a34dc</analysis.wx.ma.secret>
<analysis.wx.ma.appId>wxfa4afaa2b5f9c876</analysis.wx.ma.appId>
<analysis.wx.ma.secret>7db9f049c78c9a6cafa673deebe8330d</analysis.wx.ma.secret>
<spring.datasource.druid.url>
<![CDATA[jdbc:mysql://172.16.0.52:3306/esua_epdc_admin?allowMultiQueries=true&useUnicode=true&characterEncoding=UTF-8&useSSL=false&serverTimezone=Asia/Shanghai]]>

25
esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/controller/SysUserController.java

@ -231,6 +231,12 @@ public class SysUserController {
return new Result<List<DataScopeDeptList>>().ok(data);
}
@GetMapping("getWorkRecordRoleUser/{menuCode}")
public Result<List<Long>> getWorkRecordRoleUser(@PathVariable("menuCode") String menuCode) {
List<Long> data = sysRoleUserService.getWorkRecordRoleUser(menuCode);
return new Result<List<Long>>().ok(data);
}
/**
* 根据用户名更新用户微信openId
@ -327,6 +333,25 @@ public class SysUserController {
DeptOption deptOption = sysDeptService.getUserDeptOption();
return new Result().ok(deptOption);
}
/**
* 获取用户部门多层结构用户前端显示请求需携带token
* <p>调接口从redis获取{@link SysUserController#packageUserDeptOption(Long)}接口生成的数据</p>
* <p>此接口展示三级机构层级街道-社区-网格不包括区级</p>
*
* @return com.elink.esua.epdc.commons.tools.utils.Result<com.elink.esua.epdc.dto.DeptOption>
* @author work@yujt.net.cn
* @date 2019/11/27 14:31
*/
@GetMapping("deptOptions/getDeptAuthByUser")
public Result<DeptOption> getDeptAuthByUser() {
DeptOption deptOption = sysDeptService.getDeptAuthByUser();
return new Result().ok(deptOption);
}
@GetMapping("deptOptions/getGridAuthByUser")
public Result<List<DeptGridDTO>> getGridAuthByUser() {
List<DeptGridDTO> deptOption = sysDeptService.getGridAuthByUser();
return new Result().ok(deptOption);
}
/**
* 获取用户部门多层结构完整层级结构包括顶级部门用户前端显示请求需携带token

8
esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/dao/SysDeptDao.java

@ -10,10 +10,7 @@ package com.elink.esua.epdc.dao;
import com.elink.esua.epdc.commons.mybatis.dao.BaseDao;
import com.elink.esua.epdc.commons.tools.utils.Result;
import com.elink.esua.epdc.dto.CompleteDeptDTO;
import com.elink.esua.epdc.dto.DeptTreeDTO;
import com.elink.esua.epdc.dto.SysDeptDTO;
import com.elink.esua.epdc.dto.SysSimpleDeptDTO;
import com.elink.esua.epdc.dto.*;
import com.elink.esua.epdc.dto.epdc.GridForLeaderRegisterDTO;
import com.elink.esua.epdc.dto.epdc.form.UserSysDeptInfoFormDTO;
import com.elink.esua.epdc.dto.epdc.result.UserSysDeptInfoResultDTO;
@ -130,6 +127,9 @@ public interface SysDeptDao extends BaseDao<SysDeptEntity> {
*/
List<SysDeptEntity> selectChildrenDeptNode(@Param("dataScopeDeptList") List<Long> dataScopeDeptList, @Param("parentDeptIdList") List<Long> parentDeptIdList);
List<SysDeptEntity> selectChildrenDeptAuth(@Param("dataScopeDeptList") List<Long> dataScopeDeptList);
List<DeptGridDTO> getGridAuthByUser(@Param("deptList") List<Long> deptList);
/**
* @param formDTO
* @return java.util.List<com.elink.esua.epdc.dto.epdc.result.UserSysDeptInfoResultDTO>

2
esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/dao/SysRoleUserDao.java

@ -43,4 +43,6 @@ public interface SysRoleUserDao extends BaseDao<SysRoleUserEntity> {
* @return
*/
List<Long> getRoleIdList(Long userId);
List<Long> getWorkRecordRoleUser(String menuCode);
}

1
esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/entity/SysAnalysisMenuEntity.java

@ -117,4 +117,5 @@ public class SysAnalysisMenuEntity extends BaseEntity {
*/
private String numFlag;
}

4
esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/SysDeptService.java

@ -163,6 +163,10 @@ public interface SysDeptService extends BaseService<SysDeptEntity> {
*/
DeptOption getUserDeptOption();
DeptOption getDeptAuthByUser();
List<DeptGridDTO> getGridAuthByUser();
/**
* @param formDTO
* @return java.util.List<com.elink.esua.epdc.dto.epdc.result.UserSysDeptInfoResultDTO>

6
esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/SysRoleUserService.java

@ -46,4 +46,10 @@ public interface SysRoleUserService extends BaseService<SysRoleUserEntity> {
* @param userId 用户ID
*/
List<Long> getRoleIdList(Long userId);
/**
* 用户ID列表
* @param menuCode 用户ID
*/
List<Long> getWorkRecordRoleUser(String menuCode);
}

146
esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/impl/SysDeptServiceImpl.java

@ -350,8 +350,41 @@ public class SysDeptServiceImpl extends BaseServiceImpl<SysDeptDao, SysDeptEntit
public DeptOption getUserDeptOption() {
return getUserDeptOption(SecurityUser.getUserId());
}
@Override
public DeptOption getDeptAuthByUser() {
return getDeptAuthByUser(SecurityUser.getUserId());
}
@Override
public List<DeptGridDTO> getGridAuthByUser() {
// 用户拥有的所有部门权限
List<Long> deptList = SecurityUser.getUser().getDeptIdList();
List<DeptGridDTO> list;
if (deptList != null && deptList.size() > 0) {
list = baseDao.getGridAuthByUser(deptList);
} else {
return null;
}
return list;
}
/**
* 根据userId从redis取出用户部门层级关系街道-社区-网格
*
* @param userId
* @return com.elink.esua.epdc.dto.DeptOption
* @author work@yujt.net.cn
* @date 2020/2/11 11:18
*/
private DeptOption getDeptAuthByUser(Long userId) {
String deptOptionKey = RedisKeys.getAdminUserDeptAuthOptionKey(userId);
Object obj = redisUtils.get(deptOptionKey);
if (null == obj) {
this.packageDeptAuthByUser(userId);
obj = redisUtils.get(deptOptionKey);
}
return (DeptOption) obj;
}
/**
* 根据userId从redis取出用户部门层级关系街道-社区-网格
*
@ -507,6 +540,10 @@ public class SysDeptServiceImpl extends BaseServiceImpl<SysDeptDao, SysDeptEntit
packageDeptOptionByUser(userId, false);
}
public void packageDeptAuthByUser(Long userId) {
packageDeptOptionAuthByUser(userId);
}
public void packageAllUserDeptOption(Long userId) {
packageDeptOptionByUser(userId, true);
}
@ -577,6 +614,43 @@ public class SysDeptServiceImpl extends BaseServiceImpl<SysDeptDao, SysDeptEntit
}
}
/**
* 组装部门层级结构
*
* @param userId 用户id
* @return void
* @author work@yujt.net.cn
* @date 2020/1/28 10:49
*/
private void packageDeptOptionAuthByUser(long userId) {
UserDetail userDetail = userDetailRedis.get(userId);
// 用户拥有的所有部门权限
List<Long> dataScopeDeptList = userDetail.getDeptIdList();
if (CollUtil.isEmpty(dataScopeDeptList)) {
return;
}
// 某个部门id 声明变量,方便操作数据
Long deptId;
// 所有父级节点 此处为第一次获取,为顶级节点,相当于市区
JSONArray parentNodeArray = new JSONArray();
// 用于存放 每次组装好的 某一级节点。页面只需展示三级,所以没有存放顶级节点(市区)
List<JSONArray> cache = Lists.newArrayList();
Map<String, Object> object;
while (CollUtil.isNotEmpty(dataScopeDeptList)) {
// 通过上级节点组装所有下级节点
object = this.packageChildrenNodeAuth(parentNodeArray, dataScopeDeptList);
dataScopeDeptList = (List<Long>) object.get("scope");
parentNodeArray = (JSONArray) object.get("node");
cache.add(parentNodeArray);
}
DeptOption option = new DeptOption();
option.setOptions(cache.get(0));
redisUtils.set(RedisKeys.getAdminUserDeptAuthOptionKey(userId), option);
}
/**
* 组装下级结构节点
@ -643,7 +717,79 @@ public class SysDeptServiceImpl extends BaseServiceImpl<SysDeptDao, SysDeptEntit
result.put("pids", parentDeptIdList);
return result;
}
/**
* 组装下级结构节点
*
* @param allParentNode 所有的上级机构节点
* @param dataScopeDeptList 拥有数据权限的部门
* @return java.util.Map<java.lang.String, java.lang.Object>
* @author work@yujt.net.cn
* @date 2019/11/29 10:27
*/
private Map<String, Object> packageChildrenNodeAuth(JSONArray allParentNode, List<Long> dataScopeDeptList) {
List<SysDeptEntity> childDepts = baseDao.selectChildrenDeptAuth(dataScopeDeptList);
List<Long> parentDeptIdList = Lists.newArrayList();
// 用于存储所有子节点
JSONArray allChildrenNode = new JSONArray();
// 某个父节点下所有的子节点
JSONArray childrenNodeList;
// 单个 子节点
JSONObject nodeChild;
// 单个 父节点
JSONObject nodeParent;
Long deptId;
for (int i = 0; i < allParentNode.size(); i++) {
nodeParent = allParentNode.getJSONObject(i);
// 用于存储 一个父节点的所有下级节点
childrenNodeList = new JSONArray();
for (int j = 0; j < childDepts.size(); j++) {
deptId = childDepts.get(j).getId();
if (nodeParent.get("value").equals(String.valueOf(childDepts.get(j).getPid()))) {
nodeChild = new JSONObject();
nodeChild.put("label", childDepts.get(j).getName());
nodeChild.put("value", String.valueOf(deptId));
childrenNodeList.add(nodeChild);
allChildrenNode.add(nodeChild);
parentDeptIdList.add(deptId);
}
}
if (childrenNodeList.size() > 0) {
nodeParent.put("children", childrenNodeList);
}
}
if(allParentNode.size() == 0){
for (int index = 0; index < childDepts.size(); index++) {
JSONObject node = new JSONObject();
node.put("label", childDepts.get(index).getName());
node.put("value", String.valueOf(childDepts.get(index).getId()));
allParentNode.add(node);
parentDeptIdList.add(childDepts.get(index).getId());
allChildrenNode.add(node);
}
}
List<Long> newDataScopeList = Lists.newArrayList();
for (int i = 0; i < dataScopeDeptList.size(); i++) {
deptId = dataScopeDeptList.get(i);
if (!parentDeptIdList.contains(deptId)) {
newDataScopeList.add(deptId);
}
}
dataScopeDeptList = newDataScopeList;
Map<String, Object> result = Maps.newHashMap();
result.put("node", allChildrenNode);
result.put("scope", dataScopeDeptList);
return result;
}
@Override
public void packgeAllDeptOption() {
List<DeptTreeDTO> deptList = baseDao.selectListDeptTree();

5
esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/impl/SysRoleDataScopeServiceImpl.java

@ -10,10 +10,12 @@ package com.elink.esua.epdc.service.impl;
import cn.hutool.core.collection.CollUtil;
import com.elink.esua.epdc.commons.mybatis.service.impl.BaseServiceImpl;
import com.elink.esua.epdc.commons.tools.redis.RedisUtils;
import com.elink.esua.epdc.dao.SysRoleDataScopeDao;
import com.elink.esua.epdc.dto.DataScopeDeptList;
import com.elink.esua.epdc.entity.SysRoleDataScopeEntity;
import com.elink.esua.epdc.service.SysRoleDataScopeService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
@ -28,6 +30,8 @@ import java.util.List;
@Service
public class SysRoleDataScopeServiceImpl extends BaseServiceImpl<SysRoleDataScopeDao, SysRoleDataScopeEntity>
implements SysRoleDataScopeService {
@Autowired
private RedisUtils redisUtils;
@Override
public List<Long> getDeptIdList(Long roleId) {
@ -64,6 +68,7 @@ public class SysRoleDataScopeServiceImpl extends BaseServiceImpl<SysRoleDataScop
//保存
insert(sysRoleDataScopeEntity);
}
redisUtils.deleteByPattern("epdc:options:dept:user*");
}
@Override

2
esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/impl/SysRoleServiceImpl.java

@ -124,7 +124,7 @@ public class SysRoleServiceImpl extends BaseServiceImpl<SysRoleDao, SysRoleEntit
appRoleMenuService.saveOrUpdate(entity.getId(), dto.getAppMenuIdList());
// //更新角色与数据端菜单关系
// sysAnalysisRoleMenuService.saveOrUpdate(entity.getId(), dto.getAnalysisMenuIdList());
sysAnalysisRoleMenuService.saveOrUpdate(entity.getId(), dto.getAnalysisMenuIdList());
}

5
esua-epdc/epdc-admin/epdc-admin-server/src/main/java/com/elink/esua/epdc/service/impl/SysRoleUserServiceImpl.java

@ -62,4 +62,9 @@ public class SysRoleUserServiceImpl extends BaseServiceImpl<SysRoleUserDao, SysR
return baseDao.getRoleIdList(userId);
}
@Override
public List<Long> getWorkRecordRoleUser(String menuCode) {
return baseDao.getWorkRecordRoleUser(menuCode);
}
}

1
esua-epdc/epdc-admin/epdc-admin-server/src/main/resources/application.yml

@ -23,6 +23,7 @@ spring:
server-addr: @nacos.server-addr@
register-enabled: @nacos.register-enabled@
ip: @nacos.ip@
namespace: @nacos.namespace@
alibaba:
seata:
tx-service-group: epdc-admin-server-fescar-service-group

26
esua-epdc/epdc-admin/epdc-admin-server/src/main/resources/mapper/SysDeptDao.xml

@ -156,6 +156,32 @@
GROUP BY tem2.ID
ORDER BY tem2.create_date ASC
</select>
<select id="selectChildrenDeptAuth" resultType="com.elink.esua.epdc.entity.SysDeptEntity">
SELECT
tem1.*
FROM
( SELECT * FROM sys_dept d1 WHERE d1.id IN <foreach collection="dataScopeDeptList" item="item" open="(" separator="," close=")">#{item}</foreach> ) tem1
LEFT JOIN ( SELECT * FROM sys_dept d2 WHERE d2.id IN <foreach collection="dataScopeDeptList" item="item" open="(" separator="," close=")">#{item}</foreach> ) tem2
on tem2.id = tem1.pid
WHERE tem2.id is null
GROUP BY tem1.ID
ORDER BY tem1.create_date ASC
</select>
<select id="getGridAuthByUser" resultType="com.elink.esua.epdc.dto.DeptGridDTO">
SELECT
d1.id,
d1.name,
d1.pid,
d2.name as parentName,
concat(d2.name,'/',d1.name) as deptName
FROM sys_dept d1
left join sys_dept d2 on d1.pid = d2.id
WHERE
d1.id IN <foreach collection="deptList" item="item" open="(" separator="," close=")">#{item}</foreach>
and d1.type_key = 'grid_party'
order by d1.pid
</select>
<select id="selectListUserSysDeptInfo" parameterType="com.elink.esua.epdc.dto.epdc.form.UserSysDeptInfoFormDTO"
resultType="com.elink.esua.epdc.dto.epdc.result.UserSysDeptInfoResultDTO">

14
esua-epdc/epdc-admin/epdc-admin-server/src/main/resources/mapper/SysRoleUserDao.xml

@ -17,5 +17,17 @@
<select id="getRoleIdList" resultType="long">
select role_id from sys_role_user where user_id = #{value}
</select>
<select id="getWorkRecordRoleUser" resultType="long">
SELECT
USER.user_id
FROM
sys_app_menu menu
LEFT JOIN sys_app_role_menu rm ON menu.id = rm.menu_id
LEFT JOIN sys_role_user USER ON rm.role_id = USER.role_id
WHERE
USER.user_id IS NOT NULL
AND menu.menu_code = #{value}
GROUP BY
USER.user_id
</select>
</mapper>

2
esua-epdc/epdc-auth/Dockerfile

@ -1,5 +1,5 @@
# 基础镜像
FROM openjdk:8u242-jdk-buster
FROM openjdk:8
# 作者
MAINTAINER rongchao@elink-cn.com
# 对应pom.xml文件中的dockerfile-maven-plugin插件JAR_FILE的值

10
esua-epdc/epdc-auth/src/main/resources/logback-spring.xml

@ -144,10 +144,10 @@
<!-- 生产环境 -->
<springProfile name="prod">
<logger name="org.springframework.web" level="ERROR"/>
<logger name="org.springboot.sample" level="ERROR"/>
<logger name="com.elink.esua.epdc" level="ERROR"/>
<root level="ERROR">
<logger name="org.springframework.web" level="INFO"/>
<logger name="org.springboot.sample" level="INFO"/>
<logger name="com.elink.esua.epdc" level="INFO"/>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="DEBUG_FILE"/>
<appender-ref ref="INFO_FILE"/>
@ -156,4 +156,4 @@
</root>
</springProfile>
</configuration>
</configuration>

6
esua-epdc/epdc-commons/epdc-common-clienttoken/pom.xml

@ -23,6 +23,12 @@
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.esua.epdc</groupId>
<artifactId>epdc-user-client</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>

12
esua-epdc/epdc-commons/epdc-common-clienttoken/src/main/java/com/elink/esua/epdc/common/token/dto/TokenDto.java

@ -1,8 +1,11 @@
package com.elink.esua.epdc.common.token.dto;
import com.elink.esua.epdc.dto.UserTagInfo;
import lombok.Data;
import java.io.Serializable;
import java.util.Date;
import java.util.List;
/**
* 用户token
@ -46,7 +49,12 @@ public class TokenDto implements Serializable {
private Long gridId;
/**
* 党员标识 01
* 性别(女性-0男性-1)
*/
private String partyFlag;
private String sex;
/**
* 用户标签信息列表
*/
private List<UserTagInfo> userTagInfos;
}

20
esua-epdc/epdc-commons/epdc-common-clienttoken/src/main/java/com/elink/esua/epdc/common/token/util/CpUserDetailRedis.java

@ -40,6 +40,26 @@ public class CpUserDetailRedis {
redisUtils.hMSet(key, map, expire);
}
/**
* 缓存用户信息不同于{@link CpUserDetailRedis#set(TokenDto, long)}之处在于存放进redis时拼接key的后缀可以自定义
*
* @param user 用户信息
* @param expire 超时时长
* @param redisKeySuffix redis的key的后缀
* @return void
* @author work@yujt.net.cn
* @date 2020/1/31 15:13
*/
public void set(TokenDto user, long expire, String redisKeySuffix) {
if (user == null) {
return;
}
String key = RedisKeys.getCpUserKey(redisKeySuffix);
//bean to map
Map<String, Object> map = BeanUtil.beanToMap(user, false, true);
redisUtils.hMSet(key, map, expire);
}
/**
* 获取token信息
*

36
esua-epdc/epdc-commons/epdc-commons-mybatis/src/main/java/com/elink/esua/epdc/commons/mybatis/entity/DeptScope.java

@ -0,0 +1,36 @@
package com.elink.esua.epdc.commons.mybatis.entity;
import lombok.Data;
import java.io.Serializable;
/**
* 部门冗余字段基类
*
* @author rongchao
* @Date 19-12-18
*/
@Data
public abstract class DeptScope extends BaseEpdcEntity implements Serializable {
/***
*所有部门名称
*/
private String allDeptNames;
/***
*所有部门ID
*/
private String allDeptIds;
/***
*父所有部门
*/
private String parentDeptNames;
/***
*父所有部门
*/
private String parentDeptIds;
}

74
esua-epdc/epdc-commons/epdc-commons-mybatis/src/main/java/com/elink/esua/epdc/commons/mybatis/utils/DeptEntityUtils.java

@ -0,0 +1,74 @@
package com.elink.esua.epdc.commons.mybatis.utils;
import com.elink.esua.epdc.commons.mybatis.entity.DeptScope;
import lombok.Data;
/**
* 部门信息实体工具类
*
* @author rongchao
* @Date 19-12-18
*/
public class DeptEntityUtils {
@Data
public static class DeptDto {
/**
* 父所有部门ID
*/
private String parentDeptIds;
/**
* 父所有部门
*/
private String parentDeptNames;
/**
* 所有部门ID
*/
private String allDeptIds;
/**
* 所有部门
*/
private String allDeptNames;
}
/**
* 装载部门信息
*
* @param dto
* @param entityClass
* @return T
* @author rongchao
* @since 2019-12-18
*/
public static <T extends DeptScope> T loadDeptInfo(DeptDto dto, Class<T> entityClass) {
try {
T t = entityClass.newInstance();
t.setAllDeptIds(dto.getAllDeptIds());
t.setAllDeptNames(dto.getAllDeptNames());
t.setParentDeptIds(dto.getParentDeptIds());
t.setParentDeptNames(dto.getParentDeptNames());
return t;
} catch (InstantiationException e) {
e.printStackTrace();
} catch (IllegalAccessException e) {
e.printStackTrace();
}
return null;
}
/**
* 装载部门信息
*
* @param dto
* @param entity
* @return void
* @author rongchao
* @since 2019-12-18
*/
public static <T extends DeptScope> void loadDeptInfo(DeptDto dto, T entity) {
entity.setAllDeptIds(dto.getAllDeptIds());
entity.setAllDeptNames(dto.getAllDeptNames());
entity.setParentDeptIds(dto.getParentDeptIds());
entity.setParentDeptNames(dto.getParentDeptNames());
}
}

2
esua-epdc/epdc-commons/epdc-commons-tools-wx-ma/pom.xml

@ -31,4 +31,4 @@
</dependency>
</dependencies>
</project>
</project>

12
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/NacosConfigConstant.java

@ -0,0 +1,12 @@
package com.elink.esua.epdc.commons.tools.constant;
/**
* Nacos配置中心相关常量
*
* @author rongchao
* @Date 20-1-15
*/
public interface NacosConfigConstant {
String CONFIG_GROUP = "EPDC_CONFIG_GROUP";
}

1
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/NumConstant.java

@ -29,6 +29,7 @@ public interface NumConstant {
int THIRTY = 30;
int FORTY = 40;
int FIFTY = 50;
int SIXTY = 60;
int ONE_HUNDRED = 100;
long ZERO_L = 0L;

13
esua-epdc/epdc-commons/epdc-commons-tools/src/main/java/com/elink/esua/epdc/commons/tools/constant/PointsConstant.java

@ -0,0 +1,13 @@
package com.elink.esua.epdc.commons.tools.constant;
/**
* @Auther: yinzuomei
* @Date: 2019/12/16 19:38
* @Description: 积分用
*/
public interface PointsConstant {
/**
* 手动调整积分编码
*/
String ruleCode ="hand_regulation";
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save