489 changed files with 25577 additions and 2611 deletions
@ -0,0 +1,3 @@ |
|||
[submodule "esua-epdc/epdc-cloud-gateway-shibei"] |
|||
path = esua-epdc/epdc-cloud-gateway-shibei |
|||
url = http://121.42.41.42:7070/r/epdc-cloud-gateway-shibei.git |
@ -0,0 +1,52 @@ |
|||
--------标记一下这个已经随工作端上线 |
|||
ALTER TABLE esua_epdc_user.epdc_party_authentication_failed MODIFY COLUMN `STATE` VARCHAR(1) NULL COMMENT '状态 0-认证失败'; |
|||
ALTER TABLE esua_epdc_user.epdc_party_authentication_failed MODIFY COLUMN `CADRE_FLAG` varchar(1) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT 0 NULL COMMENT '干部下沉标识 0-否,1-是'; |
|||
ALTER TABLE esua_epdc_user.epdc_user_authenticate_history ADD AUTHENTICATED_TYPE varchar(2) NOT NULL COMMENT '认证类别(0-居民认证,1-党员认证,2-志愿者认证)'; |
|||
------------------------------------------------ |
|||
|
|||
CREATE TABLE `epdc_handle_category` ( |
|||
`ID` varchar(32) NOT NULL COMMENT '主键', |
|||
`CATEGORY_VAL` int(11) NOT NULL COMMENT '处理类别值', |
|||
`CATEGORY_LABEL` varchar(20) NOT NULL COMMENT '处理类别显示信息', |
|||
`AVAILABLE` varchar(1) NOT NULL DEFAULT '1' COMMENT '可用状态(0-不可用,1-可用)', |
|||
`SORT` int(11) NOT NULL DEFAULT '0' COMMENT '排序', |
|||
`REVISION` int(11) DEFAULT NULL COMMENT '乐观锁', |
|||
`CREATED_BY` varchar(32) DEFAULT NULL COMMENT '创建人', |
|||
`CREATED_TIME` datetime DEFAULT NULL COMMENT '创建时间', |
|||
`UPDATED_BY` varchar(32) DEFAULT NULL COMMENT '更新人', |
|||
`UPDATED_TIME` datetime DEFAULT NULL COMMENT '更新时间', |
|||
`DEL_FLAG` varchar(1) NOT NULL COMMENT '删除标识 0:未删除,1:已删除', |
|||
`CATEGORY_CODE` varchar(32) NOT NULL COMMENT '处理类型编码', |
|||
PRIMARY KEY (`ID`) |
|||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='处理类别表' |
|||
|
|||
|
|||
CREATE TABLE `epdc_role_category` ( |
|||
`ID` varchar(32) NOT NULL COMMENT '主键', |
|||
`ROLE_ID` varchar(32) NOT NULL COMMENT '角色ID', |
|||
`CATEGORY_ID` varchar(32) NOT NULL COMMENT '处理类别ID', |
|||
`REVISION` int(11) DEFAULT NULL COMMENT '乐观锁', |
|||
`CREATED_BY` varchar(32) DEFAULT NULL COMMENT '创建人', |
|||
`CREATED_TIME` datetime DEFAULT NULL COMMENT '创建时间', |
|||
`UPDATED_BY` varchar(32) DEFAULT NULL COMMENT '更新人', |
|||
`UPDATED_TIME` datetime DEFAULT NULL COMMENT '更新时间', |
|||
`DEL_FLAG` varchar(1) NOT NULL COMMENT '删除标识 0:未删除,1:已删除', |
|||
PRIMARY KEY (`ID`) |
|||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='角色和处理类别关系表' |
|||
|
|||
|
|||
ALTER TABLE EPDC_HANDLE_ROLE_CATEGORY COMMENT '角色和处理类别关系表';; |
|||
CREATE TABLE EPDC_HANDLE_ROLE_DEPT( |
|||
ID VARCHAR(32) NOT NULL COMMENT '主键' , |
|||
ROLE_ID VARCHAR(32) NOT NULL COMMENT '角色ID' , |
|||
DEPT_ID VARCHAR(32) NOT NULL COMMENT '部门ID' , |
|||
DEPT_TYPE VARCHAR(50) NOT NULL COMMENT '部门机构类型' , |
|||
REVISION INT COMMENT '乐观锁' , |
|||
CREATED_BY VARCHAR(32) COMMENT '创建人' , |
|||
CREATED_TIME DATETIME COMMENT '创建时间' , |
|||
UPDATED_BY VARCHAR(32) COMMENT '更新人' , |
|||
UPDATED_TIME DATETIME COMMENT '更新时间' , |
|||
PRIMARY KEY (ID) |
|||
) COMMENT = '处理部门角色权限表 处理部门角色权限表';; |
|||
|
|||
ALTER TABLE EPDC_HANDLE_ROLE_DEPT COMMENT '处理部门角色权限表';; |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-admin-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-admin-server:prod |
|||
container_name: epdc-admin-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.41 |
|||
volumes: |
|||
- /mnt/epdc/app/admin/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-api-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-api-server:prod |
|||
container_name: epdc-api-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.42 |
|||
volumes: |
|||
- /mnt/epdc/app/api/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-auth: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-auth:prod |
|||
container_name: epdc-auth-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.43 |
|||
volumes: |
|||
- /mnt/epdc/app/auth/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-events-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-events-server:prod |
|||
container_name: epdc-events-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.44 |
|||
volumes: |
|||
- /mnt/epdc/app/events/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-gateway: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-gateway:prod |
|||
container_name: epdc-gateway-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.40 |
|||
volumes: |
|||
- /mnt/epdc/app/gateway/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-group-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-group-server:prod |
|||
container_name: epdc-group-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.45 |
|||
volumes: |
|||
- /mnt/epdc/app/group/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-message-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-message-server:prod |
|||
container_name: epdc-message-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.46 |
|||
volumes: |
|||
- /mnt/epdc/app/message/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-news-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-news-server:prod |
|||
container_name: epdc-news-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.47 |
|||
volumes: |
|||
- /mnt/epdc/app/news/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-oss-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-oss-server:prod |
|||
container_name: epdc-oss-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.48 |
|||
volumes: |
|||
- /mnt/epdc/app/oss/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-user-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-user-server:prod |
|||
container_name: epdc-user-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.49 |
|||
volumes: |
|||
- /mnt/epdc/app/user/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,14 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-websocket-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-websocket-server:prod |
|||
container_name: epdc-websocket-server-01 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.50 |
|||
volumes: |
|||
- /mnt/epdc/app/websocket/logs:/logs |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,45 @@ |
|||
[client] |
|||
|
|||
default-character-set=utf8 |
|||
|
|||
[mysqld] |
|||
|
|||
character-set-server=utf8 |
|||
|
|||
##### 这两部操作可以解决mysql连接很慢的问题 ##### |
|||
# 根据官方文档说明,如果你的mysql主机查询DNS很慢或是有很多客户端主机时会导致连接很慢,由于我们的开发机器是不能够连接外网的, |
|||
# 所以DNS解析是不可能完成的,从而也就明白了为什么连接那么慢了。同时,请注意在增加该配置参数后,mysql的授权表中的host字段就 |
|||
# 不能够使用域名而只能够使用 ip地址了,因为这是禁止了域名解析的结果。 |
|||
|
|||
# 1.禁止域名解析 |
|||
skip-host-cache |
|||
|
|||
# 2.禁用dns解析,但是,这样不能在mysql的授权表中使用主机名了,只能使用IP。 |
|||
skip-name-resolve |
|||
|
|||
# 数据库编号, 要具有唯一性, 不能跟其他数据库重复, 方便同步区分 |
|||
server-id = 21 |
|||
|
|||
# mysql日志 |
|||
log_bin = /var/lib/mysql/mysql-bin.log |
|||
log-bin-index=slave-relay-bin.index |
|||
|
|||
#日志记录的格式 |
|||
binlog_format=MIXED |
|||
|
|||
#单个日志文件最大 |
|||
max_binlog_size = 512M |
|||
|
|||
#从库建议开启,有利于数据一致性 |
|||
relay_log_recovery = 1 |
|||
|
|||
#如果从库还会用做主库,建议开启 |
|||
log_slave_updates = 1 |
|||
|
|||
# 中继日志:存储所有主库TP过来的binlog事件主库binlog:记录主库发生过的修改事件 |
|||
# relay-log = /var/lib/mysql/mysql-relay-bin.log |
|||
|
|||
# Disabling symbolic-links is recommended to prevent assorted security risks |
|||
symbolic-links=0 |
|||
|
|||
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION |
@ -0,0 +1,21 @@ |
|||
version: "3.7" |
|||
services: |
|||
mysql-slave: |
|||
container_name: mysql-slave |
|||
image: mysql:5.7 |
|||
environment: |
|||
TZ: Asia/Shanghai |
|||
MYSQL_ROOT_PASSWORD: epdc!elink1405 |
|||
MYSQL_LOWER_CASE_TABLE_NAMES: 1 |
|||
volumes: |
|||
- /etc/localtime:/etc/localtime |
|||
- /etc/timezone:/etc/timezone |
|||
- /mnt/epdc/mysql/data:/var/lib/mysql |
|||
- /mnt/epdc/mysql/conf/mysql.conf.cnf:/etc/mysql/conf.d/mysql.conf.cnf |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.3 |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,20 @@ |
|||
version: "3.7" |
|||
services: |
|||
web: |
|||
image: nginx |
|||
ports: |
|||
- 80:80 |
|||
- 443:443 |
|||
volumes: |
|||
- /mnt/epdc/nginx/html:/usr/share/nginx/html |
|||
- /mnt/epdc/nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro |
|||
- /mnt/epdc/nginx/conf.d:/etc/nginx/conf.d:ro |
|||
- /mnt/epdc/nginx/logs:/var/log/nginx |
|||
restart: always |
|||
container_name: nginx_master |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.4 |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,27 @@ |
|||
version: "3.7" |
|||
services: |
|||
nacos3: |
|||
image: nacos/nacos-server:latest |
|||
container_name: nacos3 |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.5 |
|||
volumes: |
|||
- /mnt/epdc/nacos/logs:/home/nacos/logs |
|||
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties |
|||
environment: |
|||
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip |
|||
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty |
|||
NACOS_SERVER_IP: 172.20.0.5 #多网卡情况下,指定ip或网卡 |
|||
NACOS_SERVERS: 172.20.0.5:8848 172.19.0.3:8848 172.19.0.4:8848 #集群中其它节点[ip1:port ip2:port ip3:port] |
|||
MYSQL_MASTER_SERVICE_HOST: 172.19.0.2 #mysql配置,Master为主节点,Slave为从节点 |
|||
MYSQL_MASTER_SERVICE_PORT: 3306 |
|||
MYSQL_MASTER_SERVICE_DB_NAME: epdc_nacos |
|||
MYSQL_MASTER_SERVICE_USER: nacos |
|||
MYSQL_MASTER_SERVICE_PASSWORD: elink!nacos888 |
|||
MYSQL_SLAVE_SERVICE_HOST: 172.20.0.3 |
|||
MYSQL_SLAVE_SERVICE_PORT: 3306 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,17 @@ |
|||
version: '3.7' |
|||
services: |
|||
slave2: |
|||
image: redis |
|||
container_name: redis-slave-2 |
|||
command: redis-server --slaveof 172.19.0.11 6379 --requirepass epdc!redis@slave1405 --masterauth epdc!redis@master1405 --logfile /data/log/redis-slave2.log |
|||
restart: always |
|||
volumes: |
|||
- /mnt/epdc/redis/log:/data/log |
|||
- /mnt/epdc/redis/data:/data |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.11 |
|||
|
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,9 @@ |
|||
port 26379 |
|||
logfile "/usr/local/redis/sentinel/log/sentinel.log" |
|||
dir "/usr/local/redis/sentinel" |
|||
sentinel monitor epdcmaster 172.19.0.11 6379 2 |
|||
sentinel down-after-milliseconds epdcmaster 30000 |
|||
sentinel parallel-syncs epdcmaster 1 |
|||
sentinel failover-timeout epdcmaster 180000 |
|||
sentinel deny-scripts-reconfig yes |
|||
sentinel auth-pass epdcmaster epdc!redis@master1405 |
@ -0,0 +1,18 @@ |
|||
version: '3.7' |
|||
services: |
|||
sentinel2: |
|||
image: redis |
|||
container_name: redis-sentinel-2 |
|||
command: redis-sentinel /usr/local/etc/redis/sentinel.conf |
|||
volumes: |
|||
- /mnt/epdc/redis/sentinel/conf/sentinel1.conf:/usr/local/etc/redis/sentinel.conf |
|||
- /mnt/epdc/redis/sentinel/data:/data |
|||
- /mnt/epdc/redis/sentinel/log:/usr/local/redis/sentinel/log |
|||
- /mnt/epdc/redis/sentinel/dir:/usr/local/redis/sentinel |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.20.0.12 |
|||
|
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,73 @@ |
|||
registry { |
|||
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa |
|||
type = "nacos" |
|||
|
|||
nacos { |
|||
serverAddr = "47.104.208.104:80" |
|||
namespace = "" |
|||
cluster = "default" |
|||
} |
|||
eureka { |
|||
serviceUrl = "http://localhost:8761/eureka" |
|||
application = "default" |
|||
weight = "1" |
|||
} |
|||
redis { |
|||
serverAddr = "localhost:6379" |
|||
db = "0" |
|||
} |
|||
zk { |
|||
cluster = "default" |
|||
serverAddr = "127.0.0.1:2181" |
|||
session.timeout = 6000 |
|||
connect.timeout = 2000 |
|||
} |
|||
consul { |
|||
cluster = "default" |
|||
serverAddr = "127.0.0.1:8500" |
|||
} |
|||
etcd3 { |
|||
cluster = "default" |
|||
serverAddr = "http://localhost:2379" |
|||
} |
|||
sofa { |
|||
serverAddr = "127.0.0.1:9603" |
|||
application = "default" |
|||
region = "DEFAULT_ZONE" |
|||
datacenter = "DefaultDataCenter" |
|||
cluster = "default" |
|||
group = "SEATA_GROUP" |
|||
addressWaitTime = "3000" |
|||
} |
|||
file { |
|||
name = "file.conf" |
|||
} |
|||
} |
|||
|
|||
config { |
|||
# file、nacos 、apollo、zk、consul、etcd3 |
|||
type = "nacos" |
|||
|
|||
nacos { |
|||
serverAddr = "47.104.208.104:80" |
|||
namespace = "" |
|||
} |
|||
consul { |
|||
serverAddr = "127.0.0.1:8500" |
|||
} |
|||
apollo { |
|||
app.id = "seata-server" |
|||
apollo.meta = "http://192.168.1.204:8801" |
|||
} |
|||
zk { |
|||
serverAddr = "127.0.0.1:2181" |
|||
session.timeout = 6000 |
|||
connect.timeout = 2000 |
|||
} |
|||
etcd3 { |
|||
serverAddr = "http://localhost:2379" |
|||
} |
|||
file { |
|||
name = "file.conf" |
|||
} |
|||
} |
@ -0,0 +1,9 @@ |
|||
1. 创建网络: |
|||
|
|||
``` |
|||
docker network create -d bridge --subnet 172.20.0.0/24 epdc_network |
|||
``` |
|||
|
|||
2. 执行1-mysql中的docker-compose.yml |
|||
3. 执行2-nacos中的docker-compose.yml |
|||
https://github.com/alibaba/nacos/blob/master/distribution/conf/nacos-mysql.sql |
@ -0,0 +1,36 @@ |
|||
[client] |
|||
|
|||
default-character-set=utf8 |
|||
|
|||
[mysqld] |
|||
|
|||
character-set-server=utf8 |
|||
|
|||
##### 这两部操作可以解决mysql连接很慢的问题 ##### |
|||
# 根据官方文档说明,如果你的mysql主机查询DNS很慢或是有很多客户端主机时会导致连接很慢,由于我们的开发机器是不能够连接外网的, |
|||
# 所以DNS解析是不可能完成的,从而也就明白了为什么连接那么慢了。同时,请注意在增加该配置参数后,mysql的授权表中的host字段就 |
|||
# 不能够使用域名而只能够使用 ip地址了,因为这是禁止了域名解析的结果。 |
|||
|
|||
# 1.禁止域名解析 |
|||
skip-host-cache |
|||
|
|||
# 2.禁用dns解析,但是,这样不能在mysql的授权表中使用主机名了,只能使用IP。 |
|||
skip-name-resolve |
|||
|
|||
# 数据库编号, 要具有唯一性, 不能跟其他数据库重复, 方便同步区分 |
|||
server-id = 11 |
|||
|
|||
# mysql日志 |
|||
log_bin = /var/lib/mysql/master-bin.log |
|||
log-bin-index=master-bin.index |
|||
|
|||
# binlog日志格式,mysql默认采用statement,建议使用 mixed(是statement和row模式的结合) |
|||
binlog_format = mixed |
|||
|
|||
#单个日志文件最大 |
|||
max_binlog_size = 512M |
|||
|
|||
# Disabling symbolic-links is recommended to prevent assorted security risks |
|||
symbolic-links=0 |
|||
|
|||
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION |
@ -0,0 +1,22 @@ |
|||
version: "3.7" |
|||
services: |
|||
mysql-master: |
|||
container_name: mysql-master |
|||
image: mysql:5.7 |
|||
environment: |
|||
TZ: Asia/Shanghai |
|||
MYSQL_ROOT_PASSWORD: epdc!elink1405 |
|||
MYSQL_LOWER_CASE_TABLE_NAMES: 1 |
|||
volumes: |
|||
- /etc/localtime:/etc/localtime |
|||
- /etc/timezone:/etc/timezone |
|||
- /mnt/epdc/mysql/data:/var/lib/mysql |
|||
- /mnt/epdc/mysql/conf/mysql.conf.cnf:/etc/mysql/conf.d/mysql.conf.cnf |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.2 |
|||
networks: |
|||
epdc_network: |
|||
external: true |
|||
|
@ -0,0 +1,50 @@ |
|||
version: "3.7" |
|||
services: |
|||
nacos1: |
|||
image: nacos/nacos-server:latest |
|||
container_name: nacos1 |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.3 |
|||
volumes: |
|||
- /mnt/epdc/nacos/logs/nacos1:/home/nacos/logs |
|||
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties |
|||
environment: |
|||
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip |
|||
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty |
|||
NACOS_SERVER_IP: 172.19.0.3 #多网卡情况下,指定ip或网卡 |
|||
NACOS_SERVERS: 172.20.0.5:8848 172.19.0.3:8848 172.19.0.4:8848 #集群中其它节点[ip1:port ip2:port ip3:port] |
|||
MYSQL_MASTER_SERVICE_HOST: 172.19.0.2 #mysql配置,Master为主节点,Slave为从节点 |
|||
MYSQL_MASTER_SERVICE_PORT: 3306 |
|||
MYSQL_MASTER_SERVICE_DB_NAME: epdc_nacos |
|||
MYSQL_MASTER_SERVICE_USER: nacos |
|||
MYSQL_MASTER_SERVICE_PASSWORD: elink!nacos888 |
|||
MYSQL_SLAVE_SERVICE_HOST: 172.20.0.3 |
|||
MYSQL_SLAVE_SERVICE_PORT: 3306 |
|||
restart: on-failure |
|||
|
|||
nacos2: |
|||
image: nacos/nacos-server:latest |
|||
container_name: nacos2 |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.4 |
|||
volumes: |
|||
- /mnt/epdc/nacos/logs/nacos2:/home/nacos/logs |
|||
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties |
|||
environment: |
|||
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip |
|||
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty |
|||
NACOS_SERVER_IP: 172.19.0.4 #多网卡情况下,指定ip或网卡 |
|||
NACOS_SERVERS: 172.20.0.5:8848 172.19.0.3:8848 172.19.0.4:8848 #集群中其它节点[ip1:port ip2:port ip3:port] |
|||
MYSQL_MASTER_SERVICE_HOST: 172.19.0.2 #mysql配置,Master为主节点,Slave为从节点 |
|||
MYSQL_MASTER_SERVICE_PORT: 3306 |
|||
MYSQL_MASTER_SERVICE_DB_NAME: epdc_nacos |
|||
MYSQL_MASTER_SERVICE_USER: nacos |
|||
MYSQL_MASTER_SERVICE_PASSWORD: elink!nacos888 |
|||
MYSQL_SLAVE_SERVICE_HOST: 172.20.0.3 |
|||
MYSQL_SLAVE_SERVICE_PORT: 3306 |
|||
restart: always |
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,73 @@ |
|||
registry { |
|||
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa |
|||
type = "nacos" |
|||
|
|||
nacos { |
|||
serverAddr = "47.104.208.104:80" |
|||
namespace = "" |
|||
cluster = "default" |
|||
} |
|||
eureka { |
|||
serviceUrl = "http://localhost:8761/eureka" |
|||
application = "default" |
|||
weight = "1" |
|||
} |
|||
redis { |
|||
serverAddr = "localhost:6379" |
|||
db = "0" |
|||
} |
|||
zk { |
|||
cluster = "default" |
|||
serverAddr = "127.0.0.1:2181" |
|||
session.timeout = 6000 |
|||
connect.timeout = 2000 |
|||
} |
|||
consul { |
|||
cluster = "default" |
|||
serverAddr = "127.0.0.1:8500" |
|||
} |
|||
etcd3 { |
|||
cluster = "default" |
|||
serverAddr = "http://localhost:2379" |
|||
} |
|||
sofa { |
|||
serverAddr = "127.0.0.1:9603" |
|||
application = "default" |
|||
region = "DEFAULT_ZONE" |
|||
datacenter = "DefaultDataCenter" |
|||
cluster = "default" |
|||
group = "SEATA_GROUP" |
|||
addressWaitTime = "3000" |
|||
} |
|||
file { |
|||
name = "file.conf" |
|||
} |
|||
} |
|||
|
|||
config { |
|||
# file、nacos 、apollo、zk、consul、etcd3 |
|||
type = "nacos" |
|||
|
|||
nacos { |
|||
serverAddr = "47.104.208.104:80" |
|||
namespace = "" |
|||
} |
|||
consul { |
|||
serverAddr = "127.0.0.1:8500" |
|||
} |
|||
apollo { |
|||
app.id = "seata-server" |
|||
apollo.meta = "http://192.168.1.204:8801" |
|||
} |
|||
zk { |
|||
serverAddr = "127.0.0.1:2181" |
|||
session.timeout = 6000 |
|||
connect.timeout = 2000 |
|||
} |
|||
etcd3 { |
|||
serverAddr = "http://localhost:2379" |
|||
} |
|||
file { |
|||
name = "file.conf" |
|||
} |
|||
} |
@ -0,0 +1,81 @@ |
|||
transport.type=TCP |
|||
transport.server=NIO |
|||
transport.heartbeat=true |
|||
transport.enable-client-batch-send-request=false |
|||
transport.thread-factory.boss-thread-prefix=NettyBoss |
|||
transport.thread-factory.worker-thread-prefix=NettyServerNIOWorker |
|||
transport.thread-factory.server-executor-thread-prefix=NettyServerBizHandler |
|||
transport.thread-factory.share-boss-worker=false |
|||
transport.thread-factory.client-selector-thread-prefix=NettyClientSelector |
|||
transport.thread-factory.client-selector-thread-size=1 |
|||
transport.thread-factory.client-worker-thread-prefix=NettyClientWorkerThread |
|||
transport.thread-factory.boss-thread-size=1 |
|||
transport.thread-factory.worker-thread-size=8 |
|||
transport.shutdown.wait=3 |
|||
service.vgroup_mapping.my_test_tx_group=default |
|||
service.vgroup_mapping.epdc-api-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-demo-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-user-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-services-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-party-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-heart-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-neighbor-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-oss-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-message-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-news-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-job-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-admin-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-activiti-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-kpi-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-points-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-webservice-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-events-server-fescar-service-group=default |
|||
service.enableDegrade=false |
|||
service.disableGlobalTransaction=false |
|||
client.rm.async.commit.buffer.limit=10000 |
|||
client.rm.lock.retry.internal=10 |
|||
client.rm.lock.retry.times=30 |
|||
client.rm.report.retry.count=5 |
|||
client.rm.lock.retry.policy.branch-rollback-on-conflict=true |
|||
client.rm.table.meta.check.enable=false |
|||
client.rm.report.success.enable=true |
|||
client.tm.commit.retry.count=5 |
|||
client.tm.rollback.retry.count=5 |
|||
store.mode=db |
|||
store.file.dir=file_store/data |
|||
store.file.max-branch-session-size=16384 |
|||
store.file.max-global-session-size=512 |
|||
store.file.file-write-buffer-cache-size=16384 |
|||
store.file.flush-disk-mode=async |
|||
store.file.session.reload.read_size=100 |
|||
store.db.datasource=dbcp |
|||
store.db.db-type=mysql |
|||
store.db.driver-class-name=com.mysql.jdbc.Driver |
|||
store.db.url=jdbc:mysql://172.19.0.2:3306/seata?useUnicode=true |
|||
store.db.user=epdc |
|||
store.db.password=Elink@833066 |
|||
store.db.min-conn=1 |
|||
store.db.max-conn=3 |
|||
store.db.global.table=global_table |
|||
store.db.branch.table=branch_table |
|||
store.db.query-limit=100 |
|||
store.db.lock-table=lock_table |
|||
server.recovery.committing-retry-period=1000 |
|||
server.recovery.asyn-committing-retry-period=1000 |
|||
server.recovery.rollbacking-retry-period=1000 |
|||
server.recovery.timeout-retry-period=1000 |
|||
server.max.commit.retry.timeout=-1 |
|||
server.max.rollback.retry.timeout=-1 |
|||
server.rollback.retry.timeout.unlock.enable=false |
|||
client.undo.data.validation=true |
|||
client.undo.log.serialization=jackson |
|||
server.undo.log.save.days=7 |
|||
server.undo.log.delete.period=86400000 |
|||
client.undo.log.table=undo_log |
|||
client.log.exceptionRate=100 |
|||
transport.serialization=seata |
|||
transport.compressor=none |
|||
metrics.enabled=false |
|||
metrics.registry-type=compact |
|||
metrics.exporter-list=prometheus |
|||
metrics.exporter-prometheus-port=9898 |
@ -0,0 +1,89 @@ |
|||
#!/usr/bin/env bash |
|||
# Copyright 1999-2019 Seata.io Group. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at、 |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
while getopts ":h:p:g:t:" opt |
|||
do |
|||
case $opt in |
|||
h) |
|||
host=$OPTARG |
|||
;; |
|||
p) |
|||
port=$OPTARG |
|||
;; |
|||
g) |
|||
group=$OPTARG |
|||
;; |
|||
t) |
|||
tenant=$OPTARG |
|||
;; |
|||
?) |
|||
echo "\033[31m USAGE OPTION: $0 [-h host] [-p port] [-g group] [-t tenant] \033[0m" |
|||
exit 1 |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
if [[ -z ${host} ]]; then |
|||
host=localhost |
|||
fi |
|||
if [[ -z ${port} ]]; then |
|||
port=8848 |
|||
fi |
|||
if [[ -z ${group} ]]; then |
|||
group="SEATA_GROUP" |
|||
fi |
|||
if [[ -z ${tenant} ]]; then |
|||
tenant="" |
|||
fi |
|||
|
|||
nacosAddr=$host:$port |
|||
contentType="content-type:application/json;charset=UTF-8" |
|||
|
|||
echo "set nacosAddr=$nacosAddr" |
|||
echo "set group=$group" |
|||
|
|||
failCount=0 |
|||
tempLog=$(mktemp -u) |
|||
function addConfig() { |
|||
curl -X POST -H "${1}" "http://$2/nacos/v1/cs/configs?dataId=$3&group=$group&content=$4&tenant=$tenant" >"${tempLog}" 2>/dev/null |
|||
if [[ -z $(cat "${tempLog}") ]]; then |
|||
echo "\033[31m Please check the cluster status. \033[0m" |
|||
exit 1 |
|||
fi |
|||
if [[ $(cat "${tempLog}") =~ "true" ]]; then |
|||
echo "Set $3=$4\033[32m successfully \033[0m" |
|||
else |
|||
echo "Set $3=$4\033[31m failure \033[0m" |
|||
(( failCount++ )) |
|||
fi |
|||
} |
|||
|
|||
count=0 |
|||
for line in $(cat $(dirname "$PWD")/config.txt); do |
|||
(( count++ )) |
|||
key=${line%%=*} |
|||
value=${line#*=} |
|||
addConfig "${contentType}" "${nacosAddr}" "${key}" "${value}" |
|||
done |
|||
|
|||
echo "=========================================================================" |
|||
echo " Complete initialization parameters, \033[32m total-count:$count \033[0m, \033[31m failure-count:$failCount \033[0m" |
|||
echo "=========================================================================" |
|||
|
|||
if [[ ${failCount} -eq 0 ]]; then |
|||
echo "\033[32m Init nacos config finished, please start seata-server. \033[0m" |
|||
else |
|||
echo "\033[31m init nacos config fail. \033[0m" |
|||
fi |
@ -0,0 +1,29 @@ |
|||
version: '3.7' |
|||
services: |
|||
master: |
|||
image: redis |
|||
container_name: redis-master |
|||
command: redis-server --requirepass epdc!redis@master1405 --logfile /data/log/redis-master.log |
|||
restart: always |
|||
volumes: |
|||
- /mnt/epdc/redis/log:/data/log |
|||
- /mnt/epdc/redis/data:/data |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.11 |
|||
|
|||
slave1: |
|||
image: redis |
|||
container_name: redis-slave-1 |
|||
command: redis-server --slaveof 172.19.0.11 6379 --requirepass epdc!redis@slave1405 --masterauth epdc!redis@master1405 --logfile /data/log/redis-slave1.log |
|||
restart: always |
|||
volumes: |
|||
- /mnt/epdc/redis/log:/data/log |
|||
- /mnt/epdc/redis/data:/data |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.12 |
|||
|
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,9 @@ |
|||
port 26379 |
|||
logfile "/usr/local/redis/sentinel/log/sentinel.log" |
|||
dir "/usr/local/redis/sentinel" |
|||
sentinel monitor epdcmaster 172.19.0.11 6379 2 |
|||
sentinel down-after-milliseconds epdcmaster 30000 |
|||
sentinel parallel-syncs epdcmaster 1 |
|||
sentinel failover-timeout epdcmaster 180000 |
|||
sentinel deny-scripts-reconfig yes |
|||
sentinel auth-pass epdcmaster epdc!redis@master1405 |
@ -0,0 +1,9 @@ |
|||
port 26379 |
|||
logfile "/usr/local/redis/sentinel/log/sentinel.log" |
|||
dir "/usr/local/redis/sentinel" |
|||
sentinel monitor epdcmaster 172.19.0.11 6379 2 |
|||
sentinel down-after-milliseconds epdcmaster 30000 |
|||
sentinel parallel-syncs epdcmaster 1 |
|||
sentinel failover-timeout epdcmaster 180000 |
|||
sentinel deny-scripts-reconfig yes |
|||
sentinel auth-pass epdcmaster epdc!redis@master1405 |
@ -0,0 +1,31 @@ |
|||
version: '3.7' |
|||
services: |
|||
sentinel1: |
|||
image: redis |
|||
container_name: redis-sentinel-1 |
|||
command: redis-sentinel /usr/local/etc/redis/sentinel.conf |
|||
volumes: |
|||
- /mnt/epdc/redis/sentinel/conf/sentinel1.conf:/usr/local/etc/redis/sentinel.conf |
|||
- /mnt/epdc/redis/sentinel/data:/data |
|||
- /mnt/epdc/redis/sentinel/log:/usr/local/redis/sentinel/log |
|||
- /mnt/epdc/redis/sentinel/dir:/usr/local/redis/sentinel |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.13 |
|||
|
|||
sentinel3: |
|||
image: redis |
|||
container_name: redis-sentinel-3 |
|||
command: redis-sentinel /usr/local/etc/redis/sentinel.conf |
|||
volumes: |
|||
- /mnt/epdc/redis/sentinel/conf/sentinel2.conf:/usr/local/etc/redis/sentinel.conf |
|||
- /mnt/epdc/redis/sentinel/data2:/data |
|||
- /mnt/epdc/redis/sentinel/log2:/usr/local/redis/sentinel/log |
|||
- /mnt/epdc/redis/sentinel/dir2:/usr/local/redis/sentinel |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.14 |
|||
|
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,8 @@ |
|||
1. 创建网络: |
|||
|
|||
``` |
|||
docker network create -d bridge --subnet 172.19.0.0/24 epdc_network |
|||
``` |
|||
|
|||
2. 执行1-mysql中的docker-compose.yml |
|||
3. 执行2-nacos中的docker-compose.yml |
@ -0,0 +1,39 @@ |
|||
# centos 7 |
|||
FROM centos:7 |
|||
# 添加配置文件 |
|||
ADD conf/client.conf /etc/fdfs/ |
|||
ADD conf/http.conf /etc/fdfs/ |
|||
ADD conf/mime.types /etc/fdfs/ |
|||
ADD conf/storage.conf /etc/fdfs/ |
|||
ADD conf/tracker.conf /etc/fdfs/ |
|||
ADD fastdfs.sh /home |
|||
ADD conf/nginx.conf /etc/fdfs/ |
|||
ADD conf/mod_fastdfs.conf /etc/fdfs |
|||
|
|||
# run |
|||
RUN yum install git gcc gcc-c++ make automake autoconf libtool pcre pcre-devel zlib zlib-devel openssl-devel wget vim -y \ |
|||
&& cd /usr/local/src \ |
|||
&& git clone https://github.com/happyfish100/libfastcommon.git --depth 1 \ |
|||
&& git clone https://github.com/happyfish100/fastdfs.git --depth 1 \ |
|||
&& git clone https://github.com/happyfish100/fastdfs-nginx-module.git --depth 1 \ |
|||
&& wget http://nginx.org/download/nginx-1.15.4.tar.gz \ |
|||
&& tar -zxvf nginx-1.15.4.tar.gz \ |
|||
&& mkdir /home/dfs \ |
|||
&& cd /usr/local/src/ \ |
|||
&& cd libfastcommon/ \ |
|||
&& ./make.sh && ./make.sh install \ |
|||
&& cd ../ \ |
|||
&& cd fastdfs/ \ |
|||
&& ./make.sh && ./make.sh install \ |
|||
&& cd ../ \ |
|||
&& cd nginx-1.15.4/ \ |
|||
&& ./configure --add-module=/usr/local/src/fastdfs-nginx-module/src/ \ |
|||
&& make && make install \ |
|||
&& chmod +x /home/fastdfs.sh |
|||
|
|||
|
|||
# export config |
|||
VOLUME /etc/fdfs |
|||
|
|||
EXPOSE 22122 23000 8888 80 |
|||
ENTRYPOINT ["/home/fastdfs.sh"] |
@ -0,0 +1,45 @@ |
|||
# FastDFS Dockerfile network (网络版本) |
|||
|
|||
## 声明 |
|||
其实并没什么区别 教程是在上一位huayanYu(小锅盖)和 Wiki的作者 的基础上进行了一些修改,本质上还是huayanYu(小锅盖) 和 Wiki 上的作者写的教程 |
|||
|
|||
|
|||
## 目录介绍 |
|||
### conf |
|||
Dockerfile 所需要的一些配置文件 |
|||
当然你也可以对这些文件进行一些修改 比如 storage.conf 里面的 bast_path 等相关 |
|||
|
|||
## 使用方法 |
|||
需要注意的是 你需要在运行容器的时候制定宿主机的ip 用参数 FASTDFS_IPADDR 来指定 |
|||
|
|||
|
|||
|
|||
``` |
|||
docker run -d -e FASTDFS_IPADDR=192.168.1.234 -p 8888:8888 -p 22122:22122 -p 23000:23000 -p 8011:80 --name test-fast 镜像id/镜像名称 |
|||
``` |
|||
|
|||
|
|||
## 后记 |
|||
本质上 local 版本与 network 版本无区别 |
|||
|
|||
|
|||
|
|||
|
|||
## Statement |
|||
In fact, there is no difference between the tutorials written by Huayan Yu and Wiki on the basis of their previous authors. In essence, they are also tutorials written by the authors of Huayan Yu and Wiki. |
|||
|
|||
## Catalogue introduction |
|||
### conf |
|||
Dockerfile Some configuration files needed |
|||
Of course, you can also make some modifications to these files, such as bast_path in storage. conf, etc. |
|||
|
|||
## Usage method |
|||
Note that you need to specify the host IP when running the container with the parameter FASTDFS_IPADDR |
|||
Here's a sample docker run instruction |
|||
``` |
|||
docker run -d -e FASTDFS_IPADDR=192.168.1.234 -p 8888:8888 -p 22122:22122 -p 23000:23000 -p 8011:80 --name test-fast 镜像id/镜像名称 |
|||
``` |
|||
|
|||
## Epilogue |
|||
Essentially, there is no difference between the local version and the network version. |
|||
|
@ -0,0 +1,63 @@ |
|||
# connect timeout in seconds |
|||
# default value is 30s |
|||
connect_timeout=30 |
|||
|
|||
# network timeout in seconds |
|||
# default value is 30s |
|||
network_timeout=60 |
|||
|
|||
# the base path to store log files |
|||
base_path=/home/dfs |
|||
|
|||
# tracker_server can ocur more than once, and tracker_server format is |
|||
# "host:port", host can be hostname or ip address |
|||
tracker_server=172.19.0.30:22122 |
|||
tracker_server=172.20.0.30:22122 |
|||
|
|||
#standard log level as syslog, case insensitive, value list: |
|||
### emerg for emergency |
|||
### alert |
|||
### crit for critical |
|||
### error |
|||
### warn for warning |
|||
### notice |
|||
### info |
|||
### debug |
|||
log_level=info |
|||
|
|||
# if use connection pool |
|||
# default value is false |
|||
# since V4.05 |
|||
use_connection_pool = false |
|||
|
|||
# connections whose the idle time exceeds this time will be closed |
|||
# unit: second |
|||
# default value is 3600 |
|||
# since V4.05 |
|||
connection_pool_max_idle_time = 3600 |
|||
|
|||
# if load FastDFS parameters from tracker server |
|||
# since V4.05 |
|||
# default value is false |
|||
load_fdfs_parameters_from_tracker=false |
|||
|
|||
# if use storage ID instead of IP address |
|||
# same as tracker.conf |
|||
# valid only when load_fdfs_parameters_from_tracker is false |
|||
# default value is false |
|||
# since V4.05 |
|||
use_storage_id = false |
|||
|
|||
# specify storage ids filename, can use relative or absolute path |
|||
# same as tracker.conf |
|||
# valid only when load_fdfs_parameters_from_tracker is false |
|||
# since V4.05 |
|||
storage_ids_filename = storage_ids.conf |
|||
|
|||
|
|||
#HTTP settings |
|||
http.tracker_server_port=80 |
|||
|
|||
#use "#include" directive to include HTTP other settiongs |
|||
##include http.conf |
|||
|
@ -0,0 +1,29 @@ |
|||
# HTTP default content type |
|||
http.default_content_type = application/octet-stream |
|||
|
|||
# MIME types mapping filename |
|||
# MIME types file format: MIME_type extensions |
|||
# such as: image/jpeg jpeg jpg jpe |
|||
# you can use apache's MIME file: mime.types |
|||
http.mime_types_filename=mime.types |
|||
|
|||
# if use token to anti-steal |
|||
# default value is false (0) |
|||
http.anti_steal.check_token=false |
|||
|
|||
# token TTL (time to live), seconds |
|||
# default value is 600 |
|||
http.anti_steal.token_ttl=900 |
|||
|
|||
# secret key to generate anti-steal token |
|||
# this parameter must be set when http.anti_steal.check_token set to true |
|||
# the length of the secret key should not exceed 128 bytes |
|||
http.anti_steal.secret_key=FastDFS1234567890 |
|||
|
|||
# return the content of the file when check token fail |
|||
# default value is empty (no file sepecified) |
|||
http.anti_steal.token_check_fail=/home/yuqing/fastdfs/conf/anti-steal.jpg |
|||
|
|||
# if support multi regions for HTTP Range |
|||
# default value is true |
|||
http.multi_range.enabed = true |
File diff suppressed because it is too large
@ -0,0 +1,134 @@ |
|||
# connect timeout in seconds |
|||
# default value is 30s |
|||
connect_timeout=2 |
|||
|
|||
# network recv and send timeout in seconds |
|||
# default value is 30s |
|||
network_timeout=30 |
|||
|
|||
# the base path to store log files |
|||
base_path=/tmp |
|||
|
|||
# if load FastDFS parameters from tracker server |
|||
# since V1.12 |
|||
# default value is false |
|||
load_fdfs_parameters_from_tracker=true |
|||
|
|||
# storage sync file max delay seconds |
|||
# same as tracker.conf |
|||
# valid only when load_fdfs_parameters_from_tracker is false |
|||
# since V1.12 |
|||
# default value is 86400 seconds (one day) |
|||
storage_sync_file_max_delay = 86400 |
|||
|
|||
# if use storage ID instead of IP address |
|||
# same as tracker.conf |
|||
# valid only when load_fdfs_parameters_from_tracker is false |
|||
# default value is false |
|||
# since V1.13 |
|||
use_storage_id = false |
|||
|
|||
# specify storage ids filename, can use relative or absolute path |
|||
# same as tracker.conf |
|||
# valid only when load_fdfs_parameters_from_tracker is false |
|||
# since V1.13 |
|||
storage_ids_filename = storage_ids.conf |
|||
|
|||
# FastDFS tracker_server can ocur more than once, and tracker_server format is |
|||
# "host:port", host can be hostname or ip address |
|||
# valid only when load_fdfs_parameters_from_tracker is true |
|||
tracker_server=172.19.0.30:22122 |
|||
tracker_server=172.20.0.30:22122 |
|||
|
|||
# the port of the local storage server |
|||
# the default value is 23000 |
|||
storage_server_port=23000 |
|||
|
|||
# the group name of the local storage server |
|||
group_name=group1 |
|||
|
|||
# if the url / uri including the group name |
|||
# set to false when uri like /M00/00/00/xxx |
|||
# set to true when uri like ${group_name}/M00/00/00/xxx, such as group1/M00/xxx |
|||
# default value is false |
|||
url_have_group_name = true |
|||
|
|||
# path(disk or mount point) count, default value is 1 |
|||
# must same as storage.conf |
|||
store_path_count=1 |
|||
|
|||
# store_path#, based 0, if store_path0 not exists, it's value is base_path |
|||
# the paths must be exist |
|||
# must same as storage.conf |
|||
store_path0=/home/dfs |
|||
#store_path1=/home/yuqing/fastdfs1 |
|||
|
|||
# standard log level as syslog, case insensitive, value list: |
|||
### emerg for emergency |
|||
### alert |
|||
### crit for critical |
|||
### error |
|||
### warn for warning |
|||
### notice |
|||
### info |
|||
### debug |
|||
log_level=info |
|||
|
|||
# set the log filename, such as /usr/local/apache2/logs/mod_fastdfs.log |
|||
# empty for output to stderr (apache and nginx error_log file) |
|||
log_filename= |
|||
|
|||
# response mode when the file not exist in the local file system |
|||
## proxy: get the content from other storage server, then send to client |
|||
## redirect: redirect to the original storage server (HTTP Header is Location) |
|||
response_mode=proxy |
|||
|
|||
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a |
|||
# multi aliases split by comma. empty value means auto set by OS type |
|||
# this paramter used to get all ip address of the local host |
|||
# default values is empty |
|||
if_alias_prefix= |
|||
|
|||
# use "#include" directive to include HTTP config file |
|||
# NOTE: #include is an include directive, do NOT remove the # before include |
|||
#include http.conf |
|||
|
|||
|
|||
# if support flv |
|||
# default value is false |
|||
# since v1.15 |
|||
flv_support = true |
|||
|
|||
# flv file extension name |
|||
# default value is flv |
|||
# since v1.15 |
|||
flv_extension = flv |
|||
|
|||
|
|||
# set the group count |
|||
# set to none zero to support multi-group on this storage server |
|||
# set to 0 for single group only |
|||
# groups settings section as [group1], [group2], ..., [groupN] |
|||
# default value is 0 |
|||
# since v1.14 |
|||
group_count = 0 |
|||
|
|||
# group settings for group #1 |
|||
# since v1.14 |
|||
# when support multi-group on this storage server, uncomment following section |
|||
#[group1] |
|||
#group_name=group1 |
|||
#storage_server_port=23000 |
|||
#store_path_count=2 |
|||
#store_path0=/home/yuqing/fastdfs |
|||
#store_path1=/home/yuqing/fastdfs1 |
|||
|
|||
# group settings for group #2 |
|||
# since v1.14 |
|||
# when support multi-group, uncomment following section as neccessary |
|||
#[group2] |
|||
#group_name=group2 |
|||
#storage_server_port=23000 |
|||
#store_path_count=1 |
|||
#store_path0=/home/yuqing/fastdfs |
|||
|
@ -0,0 +1,127 @@ |
|||
|
|||
#user nobody; |
|||
worker_processes 1; |
|||
|
|||
#error_log logs/error.log; |
|||
#error_log logs/error.log notice; |
|||
#error_log logs/error.log info; |
|||
|
|||
#pid logs/nginx.pid; |
|||
|
|||
|
|||
events { |
|||
worker_connections 1024; |
|||
} |
|||
|
|||
|
|||
http { |
|||
include mime.types; |
|||
default_type application/octet-stream; |
|||
|
|||
#log_format main '$remote_addr - $remote_user [$time_local] "$request" ' |
|||
# '$status $body_bytes_sent "$http_referer" ' |
|||
# '"$http_user_agent" "$http_x_forwarded_for"'; |
|||
|
|||
#access_log logs/access.log main; |
|||
|
|||
sendfile on; |
|||
#tcp_nopush on; |
|||
|
|||
#keepalive_timeout 0; |
|||
keepalive_timeout 65; |
|||
|
|||
#gzip on; |
|||
|
|||
server { |
|||
listen 80; |
|||
server_name localhost; |
|||
|
|||
#charset koi8-r; |
|||
|
|||
#access_log logs/host.access.log main; |
|||
|
|||
location / { |
|||
root html; |
|||
index index.html index.htm; |
|||
} |
|||
|
|||
#error_page 404 /404.html; |
|||
|
|||
# redirect server error pages to the static page /50x.html |
|||
# |
|||
error_page 500 502 503 504 /50x.html; |
|||
location = /50x.html { |
|||
root html; |
|||
} |
|||
|
|||
# proxy the PHP scripts to Apache listening on 127.0.0.1:80 |
|||
# |
|||
#location ~ \.php$ { |
|||
# proxy_pass http://127.0.0.1; |
|||
#} |
|||
|
|||
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 |
|||
# |
|||
#location ~ \.php$ { |
|||
# root html; |
|||
# fastcgi_pass 127.0.0.1:9000; |
|||
# fastcgi_index index.php; |
|||
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; |
|||
# include fastcgi_params; |
|||
#} |
|||
|
|||
# deny access to .htaccess files, if Apache's document root |
|||
# concurs with nginx's one |
|||
# |
|||
#location ~ /\.ht { |
|||
# deny all; |
|||
#} |
|||
} |
|||
server { |
|||
listen 8888; |
|||
server_name localhost; |
|||
location ~/group[0-9]/ { |
|||
ngx_fastdfs_module; |
|||
} |
|||
error_page 500 502 503 504 /50x.html; |
|||
location = /50x.html { |
|||
root html; |
|||
} |
|||
} |
|||
|
|||
# another virtual host using mix of IP-, name-, and port-based configuration |
|||
# |
|||
#server { |
|||
# listen 8000; |
|||
# listen somename:8080; |
|||
# server_name somename alias another.alias; |
|||
|
|||
# location / { |
|||
# root html; |
|||
# index index.html index.htm; |
|||
# } |
|||
#} |
|||
|
|||
|
|||
# HTTPS server |
|||
# |
|||
#server { |
|||
# listen 443 ssl; |
|||
# server_name localhost; |
|||
|
|||
# ssl_certificate cert.pem; |
|||
# ssl_certificate_key cert.key; |
|||
|
|||
# ssl_session_cache shared:SSL:1m; |
|||
# ssl_session_timeout 5m; |
|||
|
|||
# ssl_ciphers HIGH:!aNULL:!MD5; |
|||
# ssl_prefer_server_ciphers on; |
|||
|
|||
# location / { |
|||
# root html; |
|||
# index index.html index.htm; |
|||
# } |
|||
#} |
|||
|
|||
} |
@ -0,0 +1,287 @@ |
|||
# is this config file disabled |
|||
# false for enabled |
|||
# true for disabled |
|||
disabled=false |
|||
|
|||
# the name of the group this storage server belongs to |
|||
# |
|||
# comment or remove this item for fetching from tracker server, |
|||
# in this case, use_storage_id must set to true in tracker.conf, |
|||
# and storage_ids.conf must be configed correctly. |
|||
group_name=epdcFile |
|||
|
|||
# bind an address of this host |
|||
# empty for bind all addresses of this host |
|||
bind_addr= |
|||
|
|||
# if bind an address of this host when connect to other servers |
|||
# (this storage server as a client) |
|||
# true for binding the address configed by above parameter: "bind_addr" |
|||
# false for binding any address of this host |
|||
client_bind=true |
|||
|
|||
# the storage server port |
|||
port=23000 |
|||
|
|||
# connect timeout in seconds |
|||
# default value is 30s |
|||
connect_timeout=10 |
|||
|
|||
# network timeout in seconds |
|||
# default value is 30s |
|||
network_timeout=60 |
|||
|
|||
# heart beat interval in seconds |
|||
heart_beat_interval=30 |
|||
|
|||
# disk usage report interval in seconds |
|||
stat_report_interval=60 |
|||
|
|||
# the base path to store data and log files |
|||
base_path=/home/dfs |
|||
|
|||
# max concurrent connections the server supported |
|||
# default value is 256 |
|||
# more max_connections means more memory will be used |
|||
# you should set this parameter larger, eg. 10240 |
|||
max_connections=1024 |
|||
|
|||
# the buff size to recv / send data |
|||
# this parameter must more than 8KB |
|||
# default value is 64KB |
|||
# since V2.00 |
|||
buff_size = 256KB |
|||
|
|||
# accept thread count |
|||
# default value is 1 |
|||
# since V4.07 |
|||
accept_threads=1 |
|||
|
|||
# work thread count, should <= max_connections |
|||
# work thread deal network io |
|||
# default value is 4 |
|||
# since V2.00 |
|||
work_threads=4 |
|||
|
|||
# if disk read / write separated |
|||
## false for mixed read and write |
|||
## true for separated read and write |
|||
# default value is true |
|||
# since V2.00 |
|||
disk_rw_separated = true |
|||
|
|||
# disk reader thread count per store base path |
|||
# for mixed read / write, this parameter can be 0 |
|||
# default value is 1 |
|||
# since V2.00 |
|||
disk_reader_threads = 1 |
|||
|
|||
# disk writer thread count per store base path |
|||
# for mixed read / write, this parameter can be 0 |
|||
# default value is 1 |
|||
# since V2.00 |
|||
disk_writer_threads = 1 |
|||
|
|||
# when no entry to sync, try read binlog again after X milliseconds |
|||
# must > 0, default value is 200ms |
|||
sync_wait_msec=50 |
|||
|
|||
# after sync a file, usleep milliseconds |
|||
# 0 for sync successively (never call usleep) |
|||
sync_interval=0 |
|||
|
|||
# storage sync start time of a day, time format: Hour:Minute |
|||
# Hour from 0 to 23, Minute from 0 to 59 |
|||
sync_start_time=00:00 |
|||
|
|||
# storage sync end time of a day, time format: Hour:Minute |
|||
# Hour from 0 to 23, Minute from 0 to 59 |
|||
sync_end_time=23:59 |
|||
|
|||
# write to the mark file after sync N files |
|||
# default value is 500 |
|||
write_mark_file_freq=500 |
|||
|
|||
# path(disk or mount point) count, default value is 1 |
|||
store_path_count=1 |
|||
|
|||
# store_path#, based 0, if store_path0 not exists, it's value is base_path |
|||
# the paths must be exist |
|||
store_path0=/home/dfs |
|||
#store_path1=/home/dfs2 |
|||
|
|||
# subdir_count * subdir_count directories will be auto created under each |
|||
# store_path (disk), value can be 1 to 256, default value is 256 |
|||
subdir_count_per_path=256 |
|||
|
|||
# tracker_server can ocur more than once, and tracker_server format is |
|||
# "host:port", host can be hostname or ip address |
|||
tracker_server=172.19.0.30:22122 |
|||
tracker_server=172.20.0.30:22122 |
|||
|
|||
#standard log level as syslog, case insensitive, value list: |
|||
### emerg for emergency |
|||
### alert |
|||
### crit for critical |
|||
### error |
|||
### warn for warning |
|||
### notice |
|||
### info |
|||
### debug |
|||
log_level=info |
|||
|
|||
#unix group name to run this program, |
|||
#not set (empty) means run by the group of current user |
|||
run_by_group= |
|||
|
|||
#unix username to run this program, |
|||
#not set (empty) means run by current user |
|||
run_by_user= |
|||
|
|||
# allow_hosts can ocur more than once, host can be hostname or ip address, |
|||
# "*" (only one asterisk) means match all ip addresses |
|||
# we can use CIDR ips like 192.168.5.64/26 |
|||
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com |
|||
# for example: |
|||
# allow_hosts=10.0.1.[1-15,20] |
|||
# allow_hosts=host[01-08,20-25].domain.com |
|||
# allow_hosts=192.168.5.64/26 |
|||
allow_hosts=* |
|||
|
|||
# the mode of the files distributed to the data path |
|||
# 0: round robin(default) |
|||
# 1: random, distributted by hash code |
|||
file_distribute_path_mode=0 |
|||
|
|||
# valid when file_distribute_to_path is set to 0 (round robin), |
|||
# when the written file count reaches this number, then rotate to next path |
|||
# default value is 100 |
|||
file_distribute_rotate_count=100 |
|||
|
|||
# call fsync to disk when write big file |
|||
# 0: never call fsync |
|||
# other: call fsync when written bytes >= this bytes |
|||
# default value is 0 (never call fsync) |
|||
fsync_after_written_bytes=0 |
|||
|
|||
# sync log buff to disk every interval seconds |
|||
# must > 0, default value is 10 seconds |
|||
sync_log_buff_interval=10 |
|||
|
|||
# sync binlog buff / cache to disk every interval seconds |
|||
# default value is 60 seconds |
|||
sync_binlog_buff_interval=10 |
|||
|
|||
# sync storage stat info to disk every interval seconds |
|||
# default value is 300 seconds |
|||
sync_stat_file_interval=300 |
|||
|
|||
# thread stack size, should >= 512KB |
|||
# default value is 512KB |
|||
thread_stack_size=512KB |
|||
|
|||
# the priority as a source server for uploading file. |
|||
# the lower this value, the higher its uploading priority. |
|||
# default value is 10 |
|||
upload_priority=10 |
|||
|
|||
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a |
|||
# multi aliases split by comma. empty value means auto set by OS type |
|||
# default values is empty |
|||
if_alias_prefix= |
|||
|
|||
# if check file duplicate, when set to true, use FastDHT to store file indexes |
|||
# 1 or yes: need check |
|||
# 0 or no: do not check |
|||
# default value is 0 |
|||
check_file_duplicate=0 |
|||
|
|||
# file signature method for check file duplicate |
|||
## hash: four 32 bits hash code |
|||
## md5: MD5 signature |
|||
# default value is hash |
|||
# since V4.01 |
|||
file_signature_method=hash |
|||
|
|||
# namespace for storing file indexes (key-value pairs) |
|||
# this item must be set when check_file_duplicate is true / on |
|||
key_namespace=FastDFS |
|||
|
|||
# set keep_alive to 1 to enable persistent connection with FastDHT servers |
|||
# default value is 0 (short connection) |
|||
keep_alive=0 |
|||
|
|||
# you can use "#include filename" (not include double quotes) directive to |
|||
# load FastDHT server list, when the filename is a relative path such as |
|||
# pure filename, the base path is the base path of current/this config file. |
|||
# must set FastDHT server list when check_file_duplicate is true / on |
|||
# please see INSTALL of FastDHT for detail |
|||
##include /home/yuqing/fastdht/conf/fdht_servers.conf |
|||
|
|||
# if log to access log |
|||
# default value is false |
|||
# since V4.00 |
|||
use_access_log = false |
|||
|
|||
# if rotate the access log every day |
|||
# default value is false |
|||
# since V4.00 |
|||
rotate_access_log = false |
|||
|
|||
# rotate access log time base, time format: Hour:Minute |
|||
# Hour from 0 to 23, Minute from 0 to 59 |
|||
# default value is 00:00 |
|||
# since V4.00 |
|||
access_log_rotate_time=00:00 |
|||
|
|||
# if rotate the error log every day |
|||
# default value is false |
|||
# since V4.02 |
|||
rotate_error_log = false |
|||
|
|||
# rotate error log time base, time format: Hour:Minute |
|||
# Hour from 0 to 23, Minute from 0 to 59 |
|||
# default value is 00:00 |
|||
# since V4.02 |
|||
error_log_rotate_time=00:00 |
|||
|
|||
# rotate access log when the log file exceeds this size |
|||
# 0 means never rotates log file by log file size |
|||
# default value is 0 |
|||
# since V4.02 |
|||
rotate_access_log_size = 0 |
|||
|
|||
# rotate error log when the log file exceeds this size |
|||
# 0 means never rotates log file by log file size |
|||
# default value is 0 |
|||
# since V4.02 |
|||
rotate_error_log_size = 0 |
|||
|
|||
# keep days of the log files |
|||
# 0 means do not delete old log files |
|||
# default value is 0 |
|||
log_file_keep_days = 0 |
|||
|
|||
# if skip the invalid record when sync file |
|||
# default value is false |
|||
# since V4.02 |
|||
file_sync_skip_invalid_record=false |
|||
|
|||
# if use connection pool |
|||
# default value is false |
|||
# since V4.05 |
|||
use_connection_pool = false |
|||
|
|||
# connections whose the idle time exceeds this time will be closed |
|||
# unit: second |
|||
# default value is 3600 |
|||
# since V4.05 |
|||
connection_pool_max_idle_time = 3600 |
|||
|
|||
# use the ip address of this storage server if domain_name is empty, |
|||
# else this domain name will ocur in the url redirected by the tracker server |
|||
http.domain_name= |
|||
|
|||
# the port of the web server on this storage server |
|||
http.server_port=8888 |
|||
|
@ -0,0 +1,278 @@ |
|||
# is this config file disabled |
|||
# false for enabled |
|||
# true for disabled |
|||
disabled=false |
|||
|
|||
# bind an address of this host |
|||
# empty for bind all addresses of this host |
|||
bind_addr= |
|||
|
|||
# the tracker server port |
|||
port=22122 |
|||
|
|||
# connect timeout in seconds |
|||
# default value is 30s |
|||
connect_timeout=10 |
|||
|
|||
# network timeout in seconds |
|||
# default value is 30s |
|||
network_timeout=60 |
|||
|
|||
# the base path to store data and log files |
|||
base_path=/home/dfs |
|||
|
|||
# max concurrent connections this server supported |
|||
# you should set this parameter larger, eg. 102400 |
|||
max_connections=1024 |
|||
|
|||
# accept thread count |
|||
# default value is 1 |
|||
# since V4.07 |
|||
accept_threads=1 |
|||
|
|||
# work thread count, should <= max_connections |
|||
# default value is 4 |
|||
# since V2.00 |
|||
work_threads=4 |
|||
|
|||
# min buff size |
|||
# default value 8KB |
|||
min_buff_size = 8KB |
|||
|
|||
# max buff size |
|||
# default value 128KB |
|||
max_buff_size = 128KB |
|||
|
|||
# the method of selecting group to upload files |
|||
# 0: round robin |
|||
# 1: specify group |
|||
# 2: load balance, select the max free space group to upload file |
|||
store_lookup=2 |
|||
|
|||
# which group to upload file |
|||
# when store_lookup set to 1, must set store_group to the group name |
|||
store_group=group2 |
|||
|
|||
# which storage server to upload file |
|||
# 0: round robin (default) |
|||
# 1: the first server order by ip address |
|||
# 2: the first server order by priority (the minimal) |
|||
# Note: if use_trunk_file set to true, must set store_server to 1 or 2 |
|||
store_server=0 |
|||
|
|||
# which path(means disk or mount point) of the storage server to upload file |
|||
# 0: round robin |
|||
# 2: load balance, select the max free space path to upload file |
|||
store_path=0 |
|||
|
|||
# which storage server to download file |
|||
# 0: round robin (default) |
|||
# 1: the source storage server which the current file uploaded to |
|||
download_server=0 |
|||
|
|||
# reserved storage space for system or other applications. |
|||
# if the free(available) space of any stoarge server in |
|||
# a group <= reserved_storage_space, |
|||
# no file can be uploaded to this group. |
|||
# bytes unit can be one of follows: |
|||
### G or g for gigabyte(GB) |
|||
### M or m for megabyte(MB) |
|||
### K or k for kilobyte(KB) |
|||
### no unit for byte(B) |
|||
### XX.XX% as ratio such as reserved_storage_space = 10% |
|||
reserved_storage_space = 1% |
|||
|
|||
#standard log level as syslog, case insensitive, value list: |
|||
### emerg for emergency |
|||
### alert |
|||
### crit for critical |
|||
### error |
|||
### warn for warning |
|||
### notice |
|||
### info |
|||
### debug |
|||
log_level=info |
|||
|
|||
#unix group name to run this program, |
|||
#not set (empty) means run by the group of current user |
|||
run_by_group= |
|||
|
|||
#unix username to run this program, |
|||
#not set (empty) means run by current user |
|||
run_by_user= |
|||
|
|||
# allow_hosts can ocur more than once, host can be hostname or ip address, |
|||
# "*" (only one asterisk) means match all ip addresses |
|||
# we can use CIDR ips like 192.168.5.64/26 |
|||
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com |
|||
# for example: |
|||
# allow_hosts=10.0.1.[1-15,20] |
|||
# allow_hosts=host[01-08,20-25].domain.com |
|||
# allow_hosts=192.168.5.64/26 |
|||
allow_hosts=* |
|||
|
|||
# sync log buff to disk every interval seconds |
|||
# default value is 10 seconds |
|||
sync_log_buff_interval = 10 |
|||
|
|||
# check storage server alive interval seconds |
|||
check_active_interval = 120 |
|||
|
|||
# thread stack size, should >= 64KB |
|||
# default value is 64KB |
|||
thread_stack_size = 64KB |
|||
|
|||
# auto adjust when the ip address of the storage server changed |
|||
# default value is true |
|||
storage_ip_changed_auto_adjust = true |
|||
|
|||
# storage sync file max delay seconds |
|||
# default value is 86400 seconds (one day) |
|||
# since V2.00 |
|||
storage_sync_file_max_delay = 86400 |
|||
|
|||
# the max time of storage sync a file |
|||
# default value is 300 seconds |
|||
# since V2.00 |
|||
storage_sync_file_max_time = 300 |
|||
|
|||
# if use a trunk file to store several small files |
|||
# default value is false |
|||
# since V3.00 |
|||
use_trunk_file = false |
|||
|
|||
# the min slot size, should <= 4KB |
|||
# default value is 256 bytes |
|||
# since V3.00 |
|||
slot_min_size = 256 |
|||
|
|||
# the max slot size, should > slot_min_size |
|||
# store the upload file to trunk file when it's size <= this value |
|||
# default value is 16MB |
|||
# since V3.00 |
|||
slot_max_size = 16MB |
|||
|
|||
# the trunk file size, should >= 4MB |
|||
# default value is 64MB |
|||
# since V3.00 |
|||
trunk_file_size = 64MB |
|||
|
|||
# if create trunk file advancely |
|||
# default value is false |
|||
# since V3.06 |
|||
trunk_create_file_advance = false |
|||
|
|||
# the time base to create trunk file |
|||
# the time format: HH:MM |
|||
# default value is 02:00 |
|||
# since V3.06 |
|||
trunk_create_file_time_base = 02:00 |
|||
|
|||
# the interval of create trunk file, unit: second |
|||
# default value is 38400 (one day) |
|||
# since V3.06 |
|||
trunk_create_file_interval = 86400 |
|||
|
|||
# the threshold to create trunk file |
|||
# when the free trunk file size less than the threshold, will create |
|||
# the trunk files |
|||
# default value is 0 |
|||
# since V3.06 |
|||
trunk_create_file_space_threshold = 20G |
|||
|
|||
# if check trunk space occupying when loading trunk free spaces |
|||
# the occupied spaces will be ignored |
|||
# default value is false |
|||
# since V3.09 |
|||
# NOTICE: set this parameter to true will slow the loading of trunk spaces |
|||
# when startup. you should set this parameter to true when neccessary. |
|||
trunk_init_check_occupying = false |
|||
|
|||
# if ignore storage_trunk.dat, reload from trunk binlog |
|||
# default value is false |
|||
# since V3.10 |
|||
# set to true once for version upgrade when your version less than V3.10 |
|||
trunk_init_reload_from_binlog = false |
|||
|
|||
# the min interval for compressing the trunk binlog file |
|||
# unit: second |
|||
# default value is 0, 0 means never compress |
|||
# FastDFS compress the trunk binlog when trunk init and trunk destroy |
|||
# recommand to set this parameter to 86400 (one day) |
|||
# since V5.01 |
|||
trunk_compress_binlog_min_interval = 0 |
|||
|
|||
# if use storage ID instead of IP address |
|||
# default value is false |
|||
# since V4.00 |
|||
use_storage_id = false |
|||
|
|||
# specify storage ids filename, can use relative or absolute path |
|||
# since V4.00 |
|||
storage_ids_filename = storage_ids.conf |
|||
|
|||
# id type of the storage server in the filename, values are: |
|||
## ip: the ip address of the storage server |
|||
## id: the server id of the storage server |
|||
# this paramter is valid only when use_storage_id set to true |
|||
# default value is ip |
|||
# since V4.03 |
|||
id_type_in_filename = ip |
|||
|
|||
# if store slave file use symbol link |
|||
# default value is false |
|||
# since V4.01 |
|||
store_slave_file_use_link = false |
|||
|
|||
# if rotate the error log every day |
|||
# default value is false |
|||
# since V4.02 |
|||
rotate_error_log = false |
|||
|
|||
# rotate error log time base, time format: Hour:Minute |
|||
# Hour from 0 to 23, Minute from 0 to 59 |
|||
# default value is 00:00 |
|||
# since V4.02 |
|||
error_log_rotate_time=00:00 |
|||
|
|||
# rotate error log when the log file exceeds this size |
|||
# 0 means never rotates log file by log file size |
|||
# default value is 0 |
|||
# since V4.02 |
|||
rotate_error_log_size = 0 |
|||
|
|||
# keep days of the log files |
|||
# 0 means do not delete old log files |
|||
# default value is 0 |
|||
log_file_keep_days = 0 |
|||
|
|||
# if use connection pool |
|||
# default value is false |
|||
# since V4.05 |
|||
use_connection_pool = false |
|||
|
|||
# connections whose the idle time exceeds this time will be closed |
|||
# unit: second |
|||
# default value is 3600 |
|||
# since V4.05 |
|||
connection_pool_max_idle_time = 3600 |
|||
|
|||
# HTTP port on this tracker server |
|||
http.server_port=8080 |
|||
|
|||
# check storage HTTP server alive interval seconds |
|||
# <= 0 for never check |
|||
# default value is 30 |
|||
http.check_alive_interval=30 |
|||
|
|||
# check storage HTTP server alive type, values are: |
|||
# tcp : connect to the storge server with HTTP port only, |
|||
# do not request and get response |
|||
# http: storage check alive url must return http status 200 |
|||
# default value is tcp |
|||
http.check_alive_type=tcp |
|||
|
|||
# check storage HTTP server alive uri/url |
|||
# NOTE: storage embed HTTP server support uri: /status.html |
|||
http.check_alive_uri=/status.html |
|||
|
@ -0,0 +1,26 @@ |
|||
#!/bin/bash |
|||
|
|||
new_val=$FASTDFS_IPADDR |
|||
old="com.ikingtech.ch116221" |
|||
|
|||
sed -i "s/$old/$new_val/g" /etc/fdfs/client.conf |
|||
sed -i "s/$old/$new_val/g" /etc/fdfs/storage.conf |
|||
sed -i "s/$old/$new_val/g" /etc/fdfs/mod_fastdfs.conf |
|||
|
|||
cat /etc/fdfs/client.conf > /etc/fdfs/client.txt |
|||
cat /etc/fdfs/storage.conf > /etc/fdfs/storage.txt |
|||
cat /etc/fdfs/mod_fastdfs.conf > /etc/fdfs/mod_fastdfs.txt |
|||
|
|||
mv /usr/local/nginx/conf/nginx.conf /usr/local/nginx/conf/nginx.conf.t |
|||
cp /etc/fdfs/nginx.conf /usr/local/nginx/conf |
|||
|
|||
echo "start trackerd" |
|||
/etc/init.d/fdfs_trackerd start |
|||
|
|||
echo "start storage" |
|||
/etc/init.d/fdfs_storaged start |
|||
|
|||
echo "start nginx" |
|||
/usr/local/nginx/sbin/nginx |
|||
|
|||
tail -f /dev/null |
@ -0,0 +1,47 @@ |
|||
version: '3.7' |
|||
services: |
|||
fastdfs-tracker: |
|||
image: season/fastdfs |
|||
restart: always |
|||
volumes: |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /mnt/epdc/fdfs/tracker_data:/fastdfs/tracker/data |
|||
command: tracker |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.30 |
|||
|
|||
fastdfs-storage: |
|||
image: season/fastdfs |
|||
restart: always |
|||
volumes: |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /mnt/epdc/fdfs/storage_data:/fastdfs/storage/data |
|||
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path |
|||
environment: |
|||
TRACKER_SERVER: 172.19.0.30:22122 |
|||
command: storage |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.31 |
|||
|
|||
fastdfs-nginx: |
|||
image: season/fastdfs |
|||
restart: always |
|||
volumes: |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /mnt/epdc/fdfs/nginx/nginx.conf:/etc/nginx/conf/nginx.conf |
|||
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path |
|||
environment: |
|||
TRACKER_SERVER: 172.19.0.30:22122 |
|||
command: nginx |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.32 |
|||
|
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,47 @@ |
|||
version: '3.7' |
|||
services: |
|||
fastdfs-tracker: |
|||
image: season/fastdfs |
|||
restart: always |
|||
volumes: |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /mnt/epdc/fdfs/tracker_data:/fastdfs/tracker/data |
|||
command: tracker |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.30 |
|||
|
|||
fastdfs-storage: |
|||
image: season/fastdfs |
|||
restart: always |
|||
volumes: |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /mnt/epdc/fdfs/storage_data:/fastdfs/storage/data |
|||
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path |
|||
environment: |
|||
TRACKER_SERVER: 172.19.0.30:22122 |
|||
command: storage |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.31 |
|||
|
|||
fastdfs-nginx: |
|||
image: season/fastdfs |
|||
restart: always |
|||
volumes: |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /mnt/epdc/fdfs/nginx/nginx.conf:/etc/nginx/conf/nginx.conf |
|||
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path |
|||
environment: |
|||
TRACKER_SERVER: 172.19.0.30:22122 |
|||
command: nginx |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.32 |
|||
|
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -0,0 +1,47 @@ |
|||
version: '3.7' |
|||
services: |
|||
fastdfs-tracker: |
|||
image: season/fastdfs |
|||
restart: always |
|||
volumes: |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /mnt/epdc/fdfs/tracker_data:/fastdfs/tracker/data |
|||
command: tracker |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.30 |
|||
|
|||
fastdfs-storage: |
|||
image: season/fastdfs |
|||
restart: always |
|||
volumes: |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /mnt/epdc/fdfs/storage_data:/fastdfs/storage/data |
|||
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path |
|||
environment: |
|||
TRACKER_SERVER: 172.19.0.30:22122 |
|||
command: storage |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.31 |
|||
|
|||
fastdfs-nginx: |
|||
image: season/fastdfs |
|||
restart: always |
|||
volumes: |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /mnt/epdc/fdfs/nginx/nginx.conf:/etc/nginx/conf/nginx.conf |
|||
- /mnt/epdc/fdfs/store_path:/fastdfs/store_path |
|||
environment: |
|||
TRACKER_SERVER: 172.19.0.30:22122 |
|||
command: nginx |
|||
networks: |
|||
epdc_network: |
|||
ipv4_address: 172.19.0.32 |
|||
|
|||
networks: |
|||
epdc_network: |
|||
external: true |
@ -1,14 +0,0 @@ |
|||
version: '3.7' |
|||
services: |
|||
web: |
|||
image: nginx |
|||
ports: |
|||
- 443:443 |
|||
volumes: |
|||
- /mnt/nginx/html:/usr/share/nginx/html |
|||
- /mnt/nginx/conf/nginx.conf:/etc/nginx/nginx.conf |
|||
- /mnt/nginx/conf.d:/etc/nginx/conf.d |
|||
- /mnt/nginx/logs:/var/log/nginx |
|||
restart: always |
|||
container_name: nginx_master |
|||
|
@ -1,50 +0,0 @@ |
|||
version: '3.7' |
|||
services: |
|||
epdc-events-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-events-server:prod |
|||
ports: |
|||
- "9066:9066" |
|||
epdc-gateway: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-gateway:prod |
|||
ports: |
|||
- "9094:9094" |
|||
epdc-auth: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-auth:prod |
|||
ports: |
|||
- "9056:9056" |
|||
epdc-admin-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-admin-server:prod |
|||
ports: |
|||
- "9055:9055" |
|||
epdc-oss-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-oss-server:prod |
|||
ports: |
|||
- "9065:9065" |
|||
epdc-api-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-api-server:prod |
|||
ports: |
|||
- "9040:9040" |
|||
epdc-news-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-news-server:prod |
|||
prort: |
|||
- "9064:9064" |
|||
epdc-user-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-user-server:prod |
|||
prort: |
|||
- "9068:9068" |
|||
epdc-websocket-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-websocket-server:prod |
|||
prort: |
|||
- "9988:9988" |
|||
epdc-kpi-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-kpi-server:prod |
|||
prort: |
|||
- "9987:9987" |
|||
epdc-group-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-group-server:prod |
|||
prort: |
|||
- "9063:9063" |
|||
epdc-message-server: |
|||
image: registry.cn-qingdao.aliyuncs.com/esua-epdc-shibei/epdc-message-server:prod |
|||
prort: |
|||
- "9062:9062" |
@ -0,0 +1,25 @@ |
|||
version: "3.7" |
|||
services: |
|||
nacos1: |
|||
image: nacos/nacos-server:latest |
|||
container_name: nacos1 |
|||
ports: |
|||
- 10001:10001 |
|||
volumes: |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /mnt/epdc/nacos/logs/nacos1:/home/nacos/logs |
|||
- /mnt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties |
|||
environment: |
|||
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip |
|||
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty |
|||
NACOS_SERVER_PORT: 10001 |
|||
NACOS_SERVER_IP: 172.16.0.53 #多网卡情况下,指定ip或网卡 |
|||
NACOS_SERVERS: 172.16.0.53:10001 172.16.0.51:10001 172.16.0.51:10002 #集群中其它节点[ip1:port ip2:port ip3:port] |
|||
MYSQL_SERVICE_HOST: 172.16.0.52 #mysql配置,Master为主节点,Slave为从节点 |
|||
MYSQL_SERVICE_PORT: 3306 |
|||
MYSQL_SERVICE_DB_NAME: esua_epdc_nacos |
|||
MYSQL_SERVICE_USER: epdc |
|||
MYSQL_SERVICE_PASSWORD: Elink@833066 |
|||
MYSQL_DATABASE_NUM: 2 |
|||
restart: always |
@ -0,0 +1,73 @@ |
|||
registry { |
|||
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa |
|||
type = "nacos" |
|||
|
|||
nacos { |
|||
serverAddr = "47.104.208.104:80" |
|||
namespace = "" |
|||
cluster = "default" |
|||
} |
|||
eureka { |
|||
serviceUrl = "http://localhost:8761/eureka" |
|||
application = "default" |
|||
weight = "1" |
|||
} |
|||
redis { |
|||
serverAddr = "localhost:6379" |
|||
db = "0" |
|||
} |
|||
zk { |
|||
cluster = "default" |
|||
serverAddr = "127.0.0.1:2181" |
|||
session.timeout = 6000 |
|||
connect.timeout = 2000 |
|||
} |
|||
consul { |
|||
cluster = "default" |
|||
serverAddr = "127.0.0.1:8500" |
|||
} |
|||
etcd3 { |
|||
cluster = "default" |
|||
serverAddr = "http://localhost:2379" |
|||
} |
|||
sofa { |
|||
serverAddr = "127.0.0.1:9603" |
|||
application = "default" |
|||
region = "DEFAULT_ZONE" |
|||
datacenter = "DefaultDataCenter" |
|||
cluster = "default" |
|||
group = "SEATA_GROUP" |
|||
addressWaitTime = "3000" |
|||
} |
|||
file { |
|||
name = "file.conf" |
|||
} |
|||
} |
|||
|
|||
config { |
|||
# file、nacos 、apollo、zk、consul、etcd3 |
|||
type = "nacos" |
|||
|
|||
nacos { |
|||
serverAddr = "47.104.208.104:80" |
|||
namespace = "" |
|||
} |
|||
consul { |
|||
serverAddr = "127.0.0.1:8500" |
|||
} |
|||
apollo { |
|||
app.id = "seata-server" |
|||
apollo.meta = "http://192.168.1.204:8801" |
|||
} |
|||
zk { |
|||
serverAddr = "127.0.0.1:2181" |
|||
session.timeout = 6000 |
|||
connect.timeout = 2000 |
|||
} |
|||
etcd3 { |
|||
serverAddr = "http://localhost:2379" |
|||
} |
|||
file { |
|||
name = "file.conf" |
|||
} |
|||
} |
@ -0,0 +1,39 @@ |
|||
#环境变量 |
|||
#seata-server 支持以下环境变量: |
|||
# |
|||
#SEATA_IP |
|||
#可选, 指定seata-server启动的IP, 该IP用于向注册中心注册时使用, 如eureka等 |
|||
# |
|||
#SEATA_PORT |
|||
#可选, 指定seata-server启动的端口, 默认为 8091 |
|||
# |
|||
#STORE_MODE |
|||
#可选, 指定seata-server的事务日志存储方式, 支持db 和 file, 默认是 file |
|||
# |
|||
#SERVER_NODE |
|||
#可选, 用于指定seata-server节点ID, 如 1,2,3..., 默认为 1 |
|||
# |
|||
#SEATA_ENV |
|||
#可选, 指定 seata-server 运行环境, 如 dev, test 等, 服务启动时会使用 registry-dev.conf 这样的配置 |
|||
# |
|||
#SEATA_CONFIG_NAME |
|||
#可选, 指定配置文件位置, 如 file:/root/registry, 将会加载 /root/registry.conf 作为配置文件 |
|||
version: "3.7" |
|||
|
|||
services: |
|||
seata-server1: |
|||
container_name: seata-server1 |
|||
image: seataio/seata-server:latest |
|||
ports: |
|||
- 9608:9608 |
|||
environment: |
|||
SEATA_IP: 47.104.85.99 |
|||
SEATA_PORT: 9608 |
|||
STORE_MODE: db |
|||
SERVER_NODE: 1 |
|||
SEATA_CONFIG_NAME: file:/root/seata-config/registry |
|||
volumes: |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /opt/epdc/seata/seata-config:/root/seata-config |
|||
- /opt/epdc/seata/logs:/root/logs/seata |
@ -0,0 +1,84 @@ |
|||
transport.type=TCP |
|||
transport.server=NIO |
|||
transport.heartbeat=true |
|||
transport.enableClientBatchSendRequest=false |
|||
transport.threadFactory.bossThreadPrefix=NettyBoss |
|||
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker |
|||
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler |
|||
transport.threadFactory.shareBossWorker=false |
|||
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector |
|||
transport.threadFactory.clientSelectorThread-size=1 |
|||
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread |
|||
transport.threadFactory.bossThreadSize=1 |
|||
transport.threadFactory.workerThreadSize=8 |
|||
transport.shutdown.wait=3 |
|||
service.vgroup_mapping.my_test_tx_group=default |
|||
service.vgroup_mapping.epdc-api-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-demo-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-user-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-services-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-party-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-heart-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-neighbor-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-oss-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-message-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-news-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-job-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-admin-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-activiti-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-kpi-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-points-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-webservice-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-events-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-custom-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-analysis-server-fescar-service-group=default |
|||
service.vgroup_mapping.epdc-group-server-fescar-service-group=default |
|||
service.enableDegrade=false |
|||
service.disableGlobalTransaction=false |
|||
client.rm.async.commit.buffer.limit=10000 |
|||
client.rm.lock.retry.internal=10 |
|||
client.rm.lock.retry.times=30 |
|||
client.rm.report.retry.count=5 |
|||
client.rm.lock.retry.policy.branch-rollback-on-conflict=true |
|||
client.rm.table.meta.check.enable=false |
|||
client.rm.report.success.enable=true |
|||
client.tm.commit.retry.count=5 |
|||
client.tm.rollback.retry.count=5 |
|||
store.mode=db |
|||
store.file.dir=file_store/data |
|||
store.file.maxBranchSessionSize=16384 |
|||
store.file.maxGlobalSessionSize=512 |
|||
store.file.fileWriteBufferCacheSize=16384 |
|||
store.file.flushDiskMode=async |
|||
store.file.session.reload.read_size=100 |
|||
store.db.datasource=dbcp |
|||
store.db.dbType=mysql |
|||
store.db.driverClassName=com.mysql.jdbc.Driver |
|||
store.db.url=jdbc:mysql://172.31.171.61:9600/epdc_seata?useUnicode=true |
|||
store.db.user=seata |
|||
store.db.password=elink888 |
|||
store.db.minConn=1 |
|||
store.db.maxConn=3 |
|||
store.db.global.table=global_table |
|||
store.db.branch.table=branch_table |
|||
store.db.queryLimit=100 |
|||
store.db.lockTable=lock_table |
|||
server.recovery.committingRetryPeriod=1000 |
|||
server.recovery.asynCommittingRetryPeriod=1000 |
|||
server.recovery.rollbackingRetryPeriod=1000 |
|||
server.recovery.timeoutRetryPeriod=1000 |
|||
server.max.commit.retry.timeout=-1 |
|||
server.max.rollback.retry.timeout=-1 |
|||
server.rollback.retry.timeout.unlock.enable=false |
|||
client.undo.data.validation=true |
|||
client.undo.log.serialization=jackson |
|||
server.undo.log.save.days=7 |
|||
server.undo.log.delete.period=86400000 |
|||
client.undo.log.table=undo_log |
|||
client.log.exceptionRate=100 |
|||
transport.serialization=seata |
|||
transport.compressor=none |
|||
metrics.enabled=false |
|||
metrics.registryType=compact |
|||
metrics.exporterList=prometheus |
|||
metrics.exporterPrometheusPort=9898 |
@ -0,0 +1,89 @@ |
|||
#!/usr/bin/env bash |
|||
# Copyright 1999-2019 Seata.io Group. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at、 |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
while getopts ":h:p:g:t:" opt |
|||
do |
|||
case $opt in |
|||
h) |
|||
host=$OPTARG |
|||
;; |
|||
p) |
|||
port=$OPTARG |
|||
;; |
|||
g) |
|||
group=$OPTARG |
|||
;; |
|||
t) |
|||
tenant=$OPTARG |
|||
;; |
|||
?) |
|||
echo "\033[31m USAGE OPTION: $0 [-h host] [-p port] [-g group] [-t tenant] \033[0m" |
|||
exit 1 |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
if [[ -z ${host} ]]; then |
|||
host=localhost |
|||
fi |
|||
if [[ -z ${port} ]]; then |
|||
port=8848 |
|||
fi |
|||
if [[ -z ${group} ]]; then |
|||
group="SEATA_GROUP" |
|||
fi |
|||
if [[ -z ${tenant} ]]; then |
|||
tenant="" |
|||
fi |
|||
|
|||
nacosAddr=$host:$port |
|||
contentType="content-type:application/json;charset=UTF-8" |
|||
|
|||
echo "set nacosAddr=$nacosAddr" |
|||
echo "set group=$group" |
|||
|
|||
failCount=0 |
|||
tempLog=$(mktemp -u) |
|||
function addConfig() { |
|||
curl -X POST -H "${1}" "http://$2/nacos/v1/cs/configs?dataId=$3&group=$group&content=$4&tenant=$tenant" >"${tempLog}" 2>/dev/null |
|||
if [[ -z $(cat "${tempLog}") ]]; then |
|||
echo "\033[31m Please check the cluster status. \033[0m" |
|||
exit 1 |
|||
fi |
|||
if [[ $(cat "${tempLog}") =~ "true" ]]; then |
|||
echo "Set $3=$4\033[32m successfully \033[0m" |
|||
else |
|||
echo "Set $3=$4\033[31m failure \033[0m" |
|||
(( failCount++ )) |
|||
fi |
|||
} |
|||
|
|||
count=0 |
|||
for line in $(cat $(dirname "$PWD")/config.txt); do |
|||
(( count++ )) |
|||
key=${line%%=*} |
|||
value=${line#*=} |
|||
addConfig "${contentType}" "${nacosAddr}" "${key}" "${value}" |
|||
done |
|||
|
|||
echo "=========================================================================" |
|||
echo " Complete initialization parameters, \033[32m total-count:$count \033[0m, \033[31m failure-count:$failCount \033[0m" |
|||
echo "=========================================================================" |
|||
|
|||
if [[ ${failCount} -eq 0 ]]; then |
|||
echo "\033[32m Init nacos config finished, please start seata-server. \033[0m" |
|||
else |
|||
echo "\033[31m init nacos config fail. \033[0m" |
|||
fi |
@ -0,0 +1,48 @@ |
|||
version: "3.7" |
|||
services: |
|||
nacos2: |
|||
image: nacos/nacos-server:latest |
|||
container_name: nacos2 |
|||
ports: |
|||
- 10001:10001 |
|||
volumes: |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /opt/epdc/nacos/logs/nacos2:/home/nacos/logs |
|||
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties |
|||
environment: |
|||
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip |
|||
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty |
|||
NACOS_SERVER_PORT: 9602 |
|||
NACOS_SERVER_IP: 172.31.171.61 #多网卡情况下,指定ip或网卡 |
|||
NACOS_SERVERS: 172.31.171.61:9601 172.31.171.61:9602 172.31.171.62:9601 #集群中其它节点[ip1:port ip2:port ip3:port] |
|||
MYSQL_SERVICE_HOST: 172.31.171.61 #mysql配置,Master为主节点,Slave为从节点 |
|||
MYSQL_SERVICE_PORT: 9600 |
|||
MYSQL_SERVICE_DB_NAME: epdc_nacos |
|||
MYSQL_SERVICE_USER: nacos |
|||
MYSQL_SERVICE_PASSWORD: elink888 |
|||
MYSQL_DATABASE_NUM: 2 |
|||
restart: always |
|||
nacos3: |
|||
image: nacos/nacos-server:latest |
|||
container_name: nacos3 |
|||
ports: |
|||
- 9601:9601 |
|||
volumes: |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /opt/epdc/nacos/logs:/home/nacos/logs |
|||
- /opt/epdc/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties |
|||
environment: |
|||
PREFER_HOST_MODE: ip #如果支持主机名可以使用hostname,否则使用ip,默认也是ip |
|||
SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty |
|||
NACOS_SERVER_PORT: 9601 |
|||
NACOS_SERVER_IP: 172.31.171.62 #多网卡情况下,指定ip或网卡 |
|||
NACOS_SERVERS: 172.31.171.61:9601 172.31.171.61:9602 172.31.171.62:9601 #集群中其它节点[ip1:port ip2:port ip3:port] |
|||
MYSQL_SERVICE_HOST: 172.31.171.61 #mysql配置,Master为主节点,Slave为从节点 |
|||
MYSQL_SERVICE_PORT: 9600 |
|||
MYSQL_SERVICE_DB_NAME: epdc_nacos |
|||
MYSQL_SERVICE_USER: nacos |
|||
MYSQL_SERVICE_PASSWORD: elink888 |
|||
MYSQL_DATABASE_NUM: 2 |
|||
restart: always |
@ -0,0 +1,73 @@ |
|||
registry { |
|||
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa |
|||
type = "nacos" |
|||
|
|||
nacos { |
|||
serverAddr = "172.31.171.61:80" |
|||
namespace = "" |
|||
cluster = "default" |
|||
} |
|||
eureka { |
|||
serviceUrl = "http://localhost:8761/eureka" |
|||
application = "default" |
|||
weight = "1" |
|||
} |
|||
redis { |
|||
serverAddr = "localhost:6379" |
|||
db = "0" |
|||
} |
|||
zk { |
|||
cluster = "default" |
|||
serverAddr = "127.0.0.1:2181" |
|||
session.timeout = 6000 |
|||
connect.timeout = 2000 |
|||
} |
|||
consul { |
|||
cluster = "default" |
|||
serverAddr = "127.0.0.1:8500" |
|||
} |
|||
etcd3 { |
|||
cluster = "default" |
|||
serverAddr = "http://localhost:2379" |
|||
} |
|||
sofa { |
|||
serverAddr = "127.0.0.1:9603" |
|||
application = "default" |
|||
region = "DEFAULT_ZONE" |
|||
datacenter = "DefaultDataCenter" |
|||
cluster = "default" |
|||
group = "SEATA_GROUP" |
|||
addressWaitTime = "3000" |
|||
} |
|||
file { |
|||
name = "file.conf" |
|||
} |
|||
} |
|||
|
|||
config { |
|||
# file、nacos 、apollo、zk、consul、etcd3 |
|||
type = "nacos" |
|||
|
|||
nacos { |
|||
serverAddr = "172.31.171.61:80" |
|||
namespace = "" |
|||
} |
|||
consul { |
|||
serverAddr = "127.0.0.1:8500" |
|||
} |
|||
apollo { |
|||
app.id = "seata-server" |
|||
apollo.meta = "http://192.168.1.204:8801" |
|||
} |
|||
zk { |
|||
serverAddr = "127.0.0.1:2181" |
|||
session.timeout = 6000 |
|||
connect.timeout = 2000 |
|||
} |
|||
etcd3 { |
|||
serverAddr = "http://localhost:2379" |
|||
} |
|||
file { |
|||
name = "file.conf" |
|||
} |
|||
} |
@ -0,0 +1,36 @@ |
|||
package com.elink.esua.epdc.commons.mybatis.entity; |
|||
|
|||
import lombok.Data; |
|||
|
|||
import java.io.Serializable; |
|||
|
|||
/** |
|||
* 部门冗余字段基类 |
|||
* |
|||
* @author rongchao |
|||
* @Date 19-12-18 |
|||
*/ |
|||
@Data |
|||
public abstract class DeptScope extends BaseEpdcEntity implements Serializable { |
|||
|
|||
/*** |
|||
*所有部门名称 |
|||
*/ |
|||
private String allDeptNames; |
|||
|
|||
/*** |
|||
*所有部门ID |
|||
*/ |
|||
private String allDeptIds; |
|||
|
|||
/*** |
|||
*父所有部门 |
|||
*/ |
|||
private String parentDeptNames; |
|||
|
|||
/*** |
|||
*父所有部门 |
|||
*/ |
|||
private String parentDeptIds; |
|||
|
|||
} |
@ -0,0 +1,74 @@ |
|||
package com.elink.esua.epdc.commons.mybatis.utils; |
|||
|
|||
import com.elink.esua.epdc.commons.mybatis.entity.DeptScope; |
|||
import lombok.Data; |
|||
|
|||
/** |
|||
* 部门信息实体工具类 |
|||
* |
|||
* @author rongchao |
|||
* @Date 19-12-18 |
|||
*/ |
|||
public class DeptEntityUtils { |
|||
|
|||
@Data |
|||
public static class DeptDto { |
|||
/** |
|||
* 父所有部门ID |
|||
*/ |
|||
private String parentDeptIds; |
|||
/** |
|||
* 父所有部门 |
|||
*/ |
|||
private String parentDeptNames; |
|||
/** |
|||
* 所有部门ID |
|||
*/ |
|||
private String allDeptIds; |
|||
/** |
|||
* 所有部门 |
|||
*/ |
|||
private String allDeptNames; |
|||
} |
|||
|
|||
/** |
|||
* 装载部门信息 |
|||
* |
|||
* @param dto |
|||
* @param entityClass |
|||
* @return T |
|||
* @author rongchao |
|||
* @since 2019-12-18 |
|||
*/ |
|||
public static <T extends DeptScope> T loadDeptInfo(DeptDto dto, Class<T> entityClass) { |
|||
try { |
|||
T t = entityClass.newInstance(); |
|||
t.setAllDeptIds(dto.getAllDeptIds()); |
|||
t.setAllDeptNames(dto.getAllDeptNames()); |
|||
t.setParentDeptIds(dto.getParentDeptIds()); |
|||
t.setParentDeptNames(dto.getParentDeptNames()); |
|||
return t; |
|||
} catch (InstantiationException e) { |
|||
e.printStackTrace(); |
|||
} catch (IllegalAccessException e) { |
|||
e.printStackTrace(); |
|||
} |
|||
return null; |
|||
} |
|||
|
|||
/** |
|||
* 装载部门信息 |
|||
* |
|||
* @param dto |
|||
* @param entity |
|||
* @return void |
|||
* @author rongchao |
|||
* @since 2019-12-18 |
|||
*/ |
|||
public static <T extends DeptScope> void loadDeptInfo(DeptDto dto, T entity) { |
|||
entity.setAllDeptIds(dto.getAllDeptIds()); |
|||
entity.setAllDeptNames(dto.getAllDeptNames()); |
|||
entity.setParentDeptIds(dto.getParentDeptIds()); |
|||
entity.setParentDeptNames(dto.getParentDeptNames()); |
|||
} |
|||
} |
@ -0,0 +1,12 @@ |
|||
package com.elink.esua.epdc.commons.tools.constant; |
|||
|
|||
/** |
|||
* Nacos配置中心相关常量 |
|||
* |
|||
* @author rongchao |
|||
* @Date 20-1-15 |
|||
*/ |
|||
public interface NacosConfigConstant { |
|||
|
|||
String CONFIG_GROUP = "EPDC_CONFIG_GROUP"; |
|||
} |
@ -0,0 +1,13 @@ |
|||
package com.elink.esua.epdc.commons.tools.constant; |
|||
|
|||
/** |
|||
* @Auther: yinzuomei |
|||
* @Date: 2019/12/16 19:38 |
|||
* @Description: 积分用 |
|||
*/ |
|||
public interface PointsConstant { |
|||
/** |
|||
* 手动调整积分编码 |
|||
*/ |
|||
String ruleCode ="hand_regulation"; |
|||
} |
@ -0,0 +1,35 @@ |
|||
package com.elink.esua.epdc.commons.tools.enums; |
|||
|
|||
/** |
|||
* 用户认证类别枚举类 |
|||
* |
|||
* @author rongchao |
|||
* @Date 19-12-19 |
|||
*/ |
|||
public enum UserAuthTypeEnum { |
|||
|
|||
/** |
|||
* 居民认证 |
|||
*/ |
|||
RESIDENT_AUTH("0"), |
|||
|
|||
/** |
|||
* 党员认证 |
|||
*/ |
|||
PARTY_AUTH("1"), |
|||
|
|||
/** |
|||
* 志愿者认证 |
|||
*/ |
|||
VOLUNTEER_AUTH("2"); |
|||
|
|||
private String value; |
|||
|
|||
UserAuthTypeEnum(String value) { |
|||
this.value = value; |
|||
} |
|||
|
|||
public String value() { |
|||
return this.value; |
|||
} |
|||
} |
@ -0,0 +1,28 @@ |
|||
package com.elink.esua.epdc.commons.tools.enums; |
|||
|
|||
/** |
|||
* @Author: yinzuomei |
|||
* @Date: 2019/12/17 19:11 |
|||
* @Description: 用户身份枚举类 |
|||
*/ |
|||
public enum UserTagEnum { |
|||
/** |
|||
* 党员 |
|||
*/ |
|||
PARTY_MEMBER("partymember"), |
|||
|
|||
/** |
|||
* 志愿者 |
|||
*/ |
|||
VOLUNTEER("volunteer"); |
|||
|
|||
private String value; |
|||
|
|||
UserTagEnum(String value) { |
|||
this.value = value; |
|||
} |
|||
|
|||
public String value() { |
|||
return this.value; |
|||
} |
|||
} |
@ -0,0 +1,40 @@ |
|||
package com.elink.esua.epdc.commons.tools.enums.pointsenum; |
|||
|
|||
/** |
|||
* @Auther: yinzuomei |
|||
* @Date: 2020/2/6 14:01 |
|||
* @Description: 积分行为编码 |
|||
*/ |
|||
public enum PointsBehaviorCodeEnum { |
|||
LIKE("like", "赞"), |
|||
DISLIKE("dislike", "踩"), |
|||
SHARE("share", "分享"), |
|||
CLOCK("clock", "打卡"), |
|||
COMMENT("comment", "评论"), |
|||
BREAK_PROMISE("break_promise", "爽约"), |
|||
JOIN_ACT("join_act", "活动积分"); |
|||
|
|||
private String behaviorCode; |
|||
private String name; |
|||
|
|||
PointsBehaviorCodeEnum(String behaviorCode, String name) { |
|||
this.behaviorCode = behaviorCode; |
|||
this.name = name; |
|||
} |
|||
|
|||
public String getBehaviorCode() { |
|||
return behaviorCode; |
|||
} |
|||
|
|||
public void setBehaviorCode(String behaviorCode) { |
|||
this.behaviorCode = behaviorCode; |
|||
} |
|||
|
|||
public String getName() { |
|||
return name; |
|||
} |
|||
|
|||
public void setName(String name) { |
|||
this.name = name; |
|||
} |
|||
} |
@ -0,0 +1,27 @@ |
|||
package com.elink.esua.epdc.commons.tools.enums.pointsenum; |
|||
|
|||
/** |
|||
* @Auther: yinzuomei |
|||
* @Date: 2019/12/13 09:43 |
|||
* @Description: 积分规则限制时限枚举类 |
|||
*/ |
|||
public enum PointsLimitTimeEnum { |
|||
/** |
|||
* 限制时限(0-分钟,1-小时,2-日,3-月,4-年) |
|||
*/ |
|||
LIMIT_TIME_MINUTE("0"), |
|||
LIMIT_TIME_HOUR("1"), |
|||
LIMIT_TIME_DAY("2"), |
|||
LIMIT_TIME_MONTH("3"), |
|||
LIMIT_TIME_YEAR("4"); |
|||
|
|||
private String value; |
|||
|
|||
PointsLimitTimeEnum(String value) { |
|||
this.value = value; |
|||
} |
|||
|
|||
public String value() { |
|||
return value; |
|||
} |
|||
} |
@ -0,0 +1,24 @@ |
|||
package com.elink.esua.epdc.commons.tools.enums.pointsenum; |
|||
|
|||
/** |
|||
* @Auther: yinzuomei |
|||
* @Date: 2019/12/13 09:31 |
|||
* @Description: 积分操作类型枚举类 |
|||
*/ |
|||
public enum PointsOperationEnum { |
|||
/** |
|||
* 规则操作类型(0-减积分,1-加积分) |
|||
*/ |
|||
OPERATION_TYPE_ADD("1"), |
|||
OPERATION_TYPE_SUBSTRACT("0"); |
|||
|
|||
private String operationType; |
|||
|
|||
PointsOperationEnum(String operationType) { |
|||
this.operationType = operationType; |
|||
} |
|||
|
|||
public String getOperationType() { |
|||
return operationType; |
|||
} |
|||
} |
@ -0,0 +1,34 @@ |
|||
package com.elink.esua.epdc.commons.tools.enums.pointsenum; |
|||
|
|||
/** |
|||
* @Auther: yinzuomei |
|||
* @Date: 2019/12/13 09:33 |
|||
* @Description: 积分操作方式枚举类 |
|||
*/ |
|||
public enum PointsOperationModeEnum { |
|||
|
|||
/** |
|||
* user-用户操作 |
|||
*/ |
|||
OPERATION_MODE_USER("user"), |
|||
|
|||
/** |
|||
* admin-管理员操作 |
|||
*/ |
|||
OPERATION_MODE_ADMIN("admin"), |
|||
|
|||
/** |
|||
* sys-系统操作 |
|||
*/ |
|||
OPERATION_MODE_SYS("sys"); |
|||
|
|||
private String operationMode; |
|||
|
|||
PointsOperationModeEnum(String operationMode) { |
|||
this.operationMode = operationMode; |
|||
} |
|||
|
|||
public String getOperationMode() { |
|||
return operationMode; |
|||
} |
|||
} |
@ -0,0 +1,26 @@ |
|||
package com.elink.esua.epdc.commons.tools.enums.pointsenum; |
|||
|
|||
/** |
|||
* @Auther: yinzuomei |
|||
* @Date: 2019/12/12 15:04 |
|||
* @Description: 积分规则可用标志枚举类 |
|||
*/ |
|||
public enum PointsRuleAvailableEnum { |
|||
|
|||
|
|||
/** |
|||
* 可用标记(0-不可用,1-可用) |
|||
*/ |
|||
AVAILABLE_TRUE("1"), |
|||
AVAILABLE_FALSE("0"); |
|||
|
|||
private String value; |
|||
|
|||
PointsRuleAvailableEnum(String value) { |
|||
this.value = value; |
|||
} |
|||
|
|||
public String value() { |
|||
return value; |
|||
} |
|||
} |
@ -0,0 +1,36 @@ |
|||
package com.elink.esua.epdc.commons.tools.enums.pointsenum; |
|||
|
|||
/** |
|||
* @Auther: yinzuomei |
|||
* @Date: 2020/2/6 14:00 |
|||
* @Description: 积分规则编码 |
|||
*/ |
|||
public enum PointsRuleCodeEnum { |
|||
CANCEL_ACT("cancel_act", "取消报名系统扣减积分"), |
|||
CONFIRM_JOIN_ACT("confirm_join_act", "参与活动确认积分"); |
|||
|
|||
private String ruleCode; |
|||
private String name; |
|||
|
|||
PointsRuleCodeEnum(String ruleCode, String name) { |
|||
this.ruleCode = ruleCode; |
|||
this.name = name; |
|||
} |
|||
|
|||
public String getRuleCode() { |
|||
return ruleCode; |
|||
} |
|||
|
|||
public void setRuleCode(String ruleCode) { |
|||
this.ruleCode = ruleCode; |
|||
} |
|||
|
|||
public String getName() { |
|||
return name; |
|||
} |
|||
|
|||
public void setName(String name) { |
|||
this.name = name; |
|||
} |
|||
} |
|||
|
@ -0,0 +1,28 @@ |
|||
package com.elink.esua.epdc.commons.tools.enums.pointsenum; |
|||
|
|||
/** |
|||
* @Auther: yinzuomei |
|||
* @Date: 2019/12/13 09:48 |
|||
* @Description: 积分是否有上限限制 枚举类 |
|||
*/ |
|||
public enum PointsUpperLimitEnum { |
|||
/** |
|||
* 是 |
|||
*/ |
|||
YES("1"), |
|||
|
|||
/** |
|||
* 否 |
|||
*/ |
|||
NO("0"); |
|||
|
|||
private String value; |
|||
|
|||
PointsUpperLimitEnum(String value) { |
|||
this.value = value; |
|||
} |
|||
|
|||
public String value() { |
|||
return value; |
|||
} |
|||
} |
@ -1,146 +1,155 @@ |
|||
package com.elink.esua.epdc.commons.tools.utils; |
|||
|
|||
/** |
|||
* @Description TODO |
|||
* @Description |
|||
* @Author yinzuomei |
|||
* @Date 2019/12/27 10:00 |
|||
*/ |
|||
public class GPSUtils { |
|||
public static double pi = 3.1415926535897932384626; |
|||
public static double x_pi = 3.14159265358979324 * 3000.0 / 180.0; |
|||
public static double a = 6378245.0; |
|||
public static double ee = 0.00669342162296594323; |
|||
public static double pi = 3.1415926535897932384626; |
|||
public static double x_pi = 3.14159265358979324 * 3000.0 / 180.0; |
|||
public static double a = 6378245.0; |
|||
public static double ee = 0.00669342162296594323; |
|||
|
|||
public static double transformLat(double x, double y) { |
|||
double ret = -100.0 + 2.0 * x + 3.0 * y + 0.2 * y * y + 0.1 * x * y |
|||
+ 0.2 * Math.sqrt(Math.abs(x)); |
|||
ret += (20.0 * Math.sin(6.0 * x * pi) + 20.0 * Math.sin(2.0 * x * pi)) * 2.0 / 3.0; |
|||
ret += (20.0 * Math.sin(y * pi) + 40.0 * Math.sin(y / 3.0 * pi)) * 2.0 / 3.0; |
|||
ret += (160.0 * Math.sin(y / 12.0 * pi) + 320 * Math.sin(y * pi / 30.0)) * 2.0 / 3.0; |
|||
return ret; |
|||
} |
|||
public static double transformLat(double x, double y) { |
|||
double ret = -100.0 + 2.0 * x + 3.0 * y + 0.2 * y * y + 0.1 * x * y |
|||
+ 0.2 * Math.sqrt(Math.abs(x)); |
|||
ret += (20.0 * Math.sin(6.0 * x * pi) + 20.0 * Math.sin(2.0 * x * pi)) * 2.0 / 3.0; |
|||
ret += (20.0 * Math.sin(y * pi) + 40.0 * Math.sin(y / 3.0 * pi)) * 2.0 / 3.0; |
|||
ret += (160.0 * Math.sin(y / 12.0 * pi) + 320 * Math.sin(y * pi / 30.0)) * 2.0 / 3.0; |
|||
return ret; |
|||
} |
|||
|
|||
public static double transformLon(double x, double y) { |
|||
double ret = 300.0 + x + 2.0 * y + 0.1 * x * x + 0.1 * x * y + 0.1 |
|||
* Math.sqrt(Math.abs(x)); |
|||
ret += (20.0 * Math.sin(6.0 * x * pi) + 20.0 * Math.sin(2.0 * x * pi)) * 2.0 / 3.0; |
|||
ret += (20.0 * Math.sin(x * pi) + 40.0 * Math.sin(x / 3.0 * pi)) * 2.0 / 3.0; |
|||
ret += (150.0 * Math.sin(x / 12.0 * pi) + 300.0 * Math.sin(x / 30.0 |
|||
* pi)) * 2.0 / 3.0; |
|||
return ret; |
|||
} |
|||
public static double[] transform(double lat, double lon) { |
|||
if (outOfChina(lat, lon)) { |
|||
return new double[]{lat,lon}; |
|||
} |
|||
double dLat = transformLat(lon - 105.0, lat - 35.0); |
|||
double dLon = transformLon(lon - 105.0, lat - 35.0); |
|||
double radLat = lat / 180.0 * pi; |
|||
double magic = Math.sin(radLat); |
|||
magic = 1 - ee * magic * magic; |
|||
double sqrtMagic = Math.sqrt(magic); |
|||
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi); |
|||
dLon = (dLon * 180.0) / (a / sqrtMagic * Math.cos(radLat) * pi); |
|||
double mgLat = lat + dLat; |
|||
double mgLon = lon + dLon; |
|||
return new double[]{mgLat,mgLon}; |
|||
} |
|||
public static boolean outOfChina(double lat, double lon) { |
|||
if (lon < 72.004 || lon > 137.8347) |
|||
return true; |
|||
if (lat < 0.8293 || lat > 55.8271) |
|||
return true; |
|||
return false; |
|||
} |
|||
/** |
|||
* 84 to 火星坐标系 (GCJ-02) World Geodetic System ==> Mars Geodetic System |
|||
* |
|||
* @param lat |
|||
* @param lon |
|||
* @return |
|||
*/ |
|||
public static double[] gps84_To_Gcj02(double lat, double lon) { |
|||
if (outOfChina(lat, lon)) { |
|||
return new double[]{lat,lon}; |
|||
} |
|||
double dLat = transformLat(lon - 105.0, lat - 35.0); |
|||
double dLon = transformLon(lon - 105.0, lat - 35.0); |
|||
double radLat = lat / 180.0 * pi; |
|||
double magic = Math.sin(radLat); |
|||
magic = 1 - ee * magic * magic; |
|||
double sqrtMagic = Math.sqrt(magic); |
|||
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi); |
|||
dLon = (dLon * 180.0) / (a / sqrtMagic * Math.cos(radLat) * pi); |
|||
double mgLat = lat + dLat; |
|||
double mgLon = lon + dLon; |
|||
return new double[]{mgLat, mgLon}; |
|||
} |
|||
public static double transformLon(double x, double y) { |
|||
double ret = 300.0 + x + 2.0 * y + 0.1 * x * x + 0.1 * x * y + 0.1 |
|||
* Math.sqrt(Math.abs(x)); |
|||
ret += (20.0 * Math.sin(6.0 * x * pi) + 20.0 * Math.sin(2.0 * x * pi)) * 2.0 / 3.0; |
|||
ret += (20.0 * Math.sin(x * pi) + 40.0 * Math.sin(x / 3.0 * pi)) * 2.0 / 3.0; |
|||
ret += (150.0 * Math.sin(x / 12.0 * pi) + 300.0 * Math.sin(x / 30.0 |
|||
* pi)) * 2.0 / 3.0; |
|||
return ret; |
|||
} |
|||
|
|||
/** |
|||
* * 火星坐标系 (GCJ-02) to 84 * * @param lon * @param lat * @return |
|||
* */ |
|||
public static double[] gcj02_To_Gps84(double lat, double lon) { |
|||
double[] gps = transform(lat, lon); |
|||
double lontitude = lon * 2 - gps[1]; |
|||
double latitude = lat * 2 - gps[0]; |
|||
return new double[]{latitude, lontitude}; |
|||
} |
|||
/** |
|||
* 火星坐标系 (GCJ-02) 与百度坐标系 (BD-09) 的转换算法 将 GCJ-02 坐标转换成 BD-09 坐标 |
|||
* |
|||
* @param lat |
|||
* @param lon |
|||
*/ |
|||
public static double[] gcj02_To_Bd09(double lat, double lon) { |
|||
double x = lon, y = lat; |
|||
double z = Math.sqrt(x * x + y * y) + 0.00002 * Math.sin(y * x_pi); |
|||
double theta = Math.atan2(y, x) + 0.000003 * Math.cos(x * x_pi); |
|||
double tempLon = z * Math.cos(theta) + 0.0065; |
|||
double tempLat = z * Math.sin(theta) + 0.006; |
|||
double[] gps = {tempLat,tempLon}; |
|||
return gps; |
|||
} |
|||
public static double[] transform(double lat, double lon) { |
|||
if (outOfChina(lat, lon)) { |
|||
return new double[]{lat, lon}; |
|||
} |
|||
double dLat = transformLat(lon - 105.0, lat - 35.0); |
|||
double dLon = transformLon(lon - 105.0, lat - 35.0); |
|||
double radLat = lat / 180.0 * pi; |
|||
double magic = Math.sin(radLat); |
|||
magic = 1 - ee * magic * magic; |
|||
double sqrtMagic = Math.sqrt(magic); |
|||
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi); |
|||
dLon = (dLon * 180.0) / (a / sqrtMagic * Math.cos(radLat) * pi); |
|||
double mgLat = lat + dLat; |
|||
double mgLon = lon + dLon; |
|||
return new double[]{mgLat, mgLon}; |
|||
} |
|||
|
|||
/** |
|||
* * 火星坐标系 (GCJ-02) 与百度坐标系 (BD-09) 的转换算法 * * 将 BD-09 坐标转换成GCJ-02 坐标 * * @param |
|||
* bd_lat * @param bd_lon * @return |
|||
*/ |
|||
public static double[] bd09_To_Gcj02(double lat, double lon) { |
|||
double x = lon - 0.0065, y = lat - 0.006; |
|||
double z = Math.sqrt(x * x + y * y) - 0.00002 * Math.sin(y * x_pi); |
|||
double theta = Math.atan2(y, x) - 0.000003 * Math.cos(x * x_pi); |
|||
double tempLon = z * Math.cos(theta); |
|||
double tempLat = z * Math.sin(theta); |
|||
double[] gps = {tempLat,tempLon}; |
|||
return gps; |
|||
} |
|||
public static boolean outOfChina(double lat, double lon) { |
|||
if (lon < 72.004 || lon > 137.8347) |
|||
return true; |
|||
if (lat < 0.8293 || lat > 55.8271) |
|||
return true; |
|||
return false; |
|||
} |
|||
|
|||
/**将gps84转为bd09 |
|||
* @param lat |
|||
* @param lon |
|||
* @return |
|||
*/ |
|||
public static double[] gps84_To_bd09(double lat,double lon){ |
|||
double[] gcj02 = gps84_To_Gcj02(lat,lon); |
|||
double[] bd09 = gcj02_To_Bd09(gcj02[0],gcj02[1]); |
|||
return bd09; |
|||
} |
|||
public static double[] bd09_To_gps84(double lat,double lon){ |
|||
double[] gcj02 = bd09_To_Gcj02(lat, lon); |
|||
double[] gps84 = gcj02_To_Gps84(gcj02[0], gcj02[1]); |
|||
//保留小数点后六位
|
|||
gps84[0] = retain6(gps84[0]); |
|||
gps84[1] = retain6(gps84[1]); |
|||
return gps84; |
|||
} |
|||
/** |
|||
* 84 to 火星坐标系 (GCJ-02) World Geodetic System ==> Mars Geodetic System |
|||
* |
|||
* @param lat |
|||
* @param lon |
|||
* @return |
|||
*/ |
|||
public static double[] gps84_To_Gcj02(double lat, double lon) { |
|||
if (outOfChina(lat, lon)) { |
|||
return new double[]{lat, lon}; |
|||
} |
|||
double dLat = transformLat(lon - 105.0, lat - 35.0); |
|||
double dLon = transformLon(lon - 105.0, lat - 35.0); |
|||
double radLat = lat / 180.0 * pi; |
|||
double magic = Math.sin(radLat); |
|||
magic = 1 - ee * magic * magic; |
|||
double sqrtMagic = Math.sqrt(magic); |
|||
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi); |
|||
dLon = (dLon * 180.0) / (a / sqrtMagic * Math.cos(radLat) * pi); |
|||
double mgLat = lat + dLat; |
|||
double mgLon = lon + dLon; |
|||
return new double[]{mgLat, mgLon}; |
|||
} |
|||
|
|||
/**保留小数点后六位 |
|||
* @param num |
|||
* @return |
|||
*/ |
|||
private static double retain6(double num){ |
|||
String result = String .format("%.6f", num); |
|||
return Double.valueOf(result); |
|||
} |
|||
/** |
|||
* * 火星坐标系 (GCJ-02) to 84 * * @param lon * @param lat * @return |
|||
*/ |
|||
public static double[] gcj02_To_Gps84(double lat, double lon) { |
|||
double[] gps = transform(lat, lon); |
|||
double lontitude = lon * 2 - gps[1]; |
|||
double latitude = lat * 2 - gps[0]; |
|||
return new double[]{latitude, lontitude}; |
|||
} |
|||
|
|||
/** |
|||
* 火星坐标系 (GCJ-02) 与百度坐标系 (BD-09) 的转换算法 将 GCJ-02 坐标转换成 BD-09 坐标 |
|||
* |
|||
* @param lat |
|||
* @param lon |
|||
*/ |
|||
public static double[] gcj02_To_Bd09(double lat, double lon) { |
|||
double x = lon, y = lat; |
|||
double z = Math.sqrt(x * x + y * y) + 0.00002 * Math.sin(y * x_pi); |
|||
double theta = Math.atan2(y, x) + 0.000003 * Math.cos(x * x_pi); |
|||
double tempLon = z * Math.cos(theta) + 0.0065; |
|||
double tempLat = z * Math.sin(theta) + 0.006; |
|||
double[] gps = {tempLat, tempLon}; |
|||
return gps; |
|||
} |
|||
|
|||
/** |
|||
* * 火星坐标系 (GCJ-02) 与百度坐标系 (BD-09) 的转换算法 * * 将 BD-09 坐标转换成GCJ-02 坐标 * * @param |
|||
* bd_lat * @param bd_lon * @return |
|||
*/ |
|||
public static double[] bd09_To_Gcj02(double lat, double lon) { |
|||
double x = lon - 0.0065, y = lat - 0.006; |
|||
double z = Math.sqrt(x * x + y * y) - 0.00002 * Math.sin(y * x_pi); |
|||
double theta = Math.atan2(y, x) - 0.000003 * Math.cos(x * x_pi); |
|||
double tempLon = z * Math.cos(theta); |
|||
double tempLat = z * Math.sin(theta); |
|||
double[] gps = {tempLat, tempLon}; |
|||
return gps; |
|||
} |
|||
|
|||
/** |
|||
* 将gps84转为bd09 |
|||
* |
|||
* @param lat |
|||
* @param lon |
|||
* @return |
|||
*/ |
|||
public static double[] gps84_To_bd09(double lat, double lon) { |
|||
double[] gcj02 = gps84_To_Gcj02(lat, lon); |
|||
double[] bd09 = gcj02_To_Bd09(gcj02[0], gcj02[1]); |
|||
return bd09; |
|||
} |
|||
|
|||
public static double[] bd09_To_gps84(double lat, double lon) { |
|||
double[] gcj02 = bd09_To_Gcj02(lat, lon); |
|||
double[] gps84 = gcj02_To_Gps84(gcj02[0], gcj02[1]); |
|||
//保留小数点后六位
|
|||
gps84[0] = retain6(gps84[0]); |
|||
gps84[1] = retain6(gps84[1]); |
|||
return gps84; |
|||
} |
|||
|
|||
/** |
|||
* 保留小数点后六位 |
|||
* |
|||
* @param num |
|||
* @return |
|||
*/ |
|||
private static double retain6(double num) { |
|||
String result = String.format("%.6f", num); |
|||
return Double.valueOf(result); |
|||
} |
|||
} |
|||
|
|||
|
@ -1,246 +0,0 @@ |
|||
<?xml version="1.0" encoding="UTF-8"?> |
|||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" |
|||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> |
|||
<modelVersion>4.0.0</modelVersion> |
|||
|
|||
<parent> |
|||
<groupId>com.esua.epdc</groupId> |
|||
<artifactId>esua-epdc</artifactId> |
|||
<version>1.0.0</version> |
|||
</parent> |
|||
|
|||
<artifactId>epdc-gateway</artifactId> |
|||
<packaging>jar</packaging> |
|||
|
|||
<dependencies> |
|||
<dependency> |
|||
<groupId>com.esua.epdc</groupId> |
|||
<artifactId>epdc-commons-tools</artifactId> |
|||
<version>1.0.0</version> |
|||
</dependency> |
|||
<dependency> |
|||
<groupId>org.springframework.cloud</groupId> |
|||
<artifactId>spring-cloud-starter-gateway</artifactId> |
|||
</dependency> |
|||
<dependency> |
|||
<groupId>com.alibaba.cloud</groupId> |
|||
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId> |
|||
</dependency> |
|||
<dependency> |
|||
<groupId>org.springframework.cloud</groupId> |
|||
<artifactId>spring-cloud-starter-netflix-hystrix</artifactId> |
|||
</dependency> |
|||
<dependency> |
|||
<groupId>de.codecentric</groupId> |
|||
<artifactId>spring-boot-admin-starter-client</artifactId> |
|||
<version>${spring.boot.admin.version}</version> |
|||
</dependency> |
|||
<!-- zipkin client --> |
|||
<dependency> |
|||
<groupId>org.springframework.cloud</groupId> |
|||
<artifactId>spring-cloud-starter-zipkin</artifactId> |
|||
</dependency> |
|||
<dependency> |
|||
<groupId>com.esua.epdc</groupId> |
|||
<artifactId>epdc-common-clienttoken</artifactId> |
|||
<version>1.0.0</version> |
|||
<scope>compile</scope> |
|||
</dependency> |
|||
|
|||
<dependency> |
|||
<groupId>com.esua.epdc</groupId> |
|||
<artifactId>epdc-common-clienttoken</artifactId> |
|||
<version>1.0.0</version> |
|||
</dependency> |
|||
</dependencies> |
|||
|
|||
<build> |
|||
<finalName>${project.artifactId}</finalName> |
|||
<plugins> |
|||
<plugin> |
|||
<groupId>org.springframework.boot</groupId> |
|||
<artifactId>spring-boot-maven-plugin</artifactId> |
|||
</plugin> |
|||
<plugin> |
|||
<groupId>org.apache.maven.plugins</groupId> |
|||
<artifactId>maven-surefire-plugin</artifactId> |
|||
<configuration> |
|||
<skipTests>true</skipTests> |
|||
</configuration> |
|||
</plugin> |
|||
<plugin> |
|||
<groupId>org.apache.maven.plugins</groupId> |
|||
<artifactId>maven-deploy-plugin</artifactId> |
|||
<configuration> |
|||
<skip>true</skip> |
|||
</configuration> |
|||
</plugin> |
|||
<plugin> |
|||
<groupId>com.spotify</groupId> |
|||
<artifactId>dockerfile-maven-plugin</artifactId> |
|||
</plugin> |
|||
</plugins> |
|||
</build> |
|||
|
|||
<profiles> |
|||
<profile> |
|||
<id>dev</id> |
|||
<activation> |
|||
<activeByDefault>true</activeByDefault> |
|||
</activation> |
|||
<properties> |
|||
<server.port>9094</server.port> |
|||
<spring.profiles.active>dev</spring.profiles.active> |
|||
<docker.tag>dev</docker.tag> |
|||
|
|||
<!-- redis配置 --> |
|||
<spring.redis.index>2</spring.redis.index> |
|||
<spring.redis.host>47.104.224.45</spring.redis.host> |
|||
<spring.redis.port>6379</spring.redis.port> |
|||
<spring.redis.password>elink@888</spring.redis.password> |
|||
|
|||
<!-- gateway routes --> |
|||
<gateway.routes.epdc-auth-server.uri>lb://epdc-auth-server</gateway.routes.epdc-auth-server.uri> |
|||
<!-- <gateway.routes.epdc-admin-server.uri>lb://epdc-admin-server</gateway.routes.epdc-admin-server.uri>--> |
|||
<gateway.routes.epdc-admin-server.uri>http://127.0.0.1:9092</gateway.routes.epdc-admin-server.uri> |
|||
<gateway.routes.epdc-activiti-server.uri>lb://epdc-activiti-server |
|||
</gateway.routes.epdc-activiti-server.uri> |
|||
<gateway.routes.epdc-api-server.uri>lb://epdc-api-server</gateway.routes.epdc-api-server.uri> |
|||
<!-- <gateway.routes.epdc-api-server.uri>http://127.0.0.1:9040</gateway.routes.epdc-api-server.uri>--> |
|||
<gateway.routes.epdc-app-server.uri>lb://epdc-app-server</gateway.routes.epdc-app-server.uri> |
|||
<!-- <gateway.routes.epdc-app-server.uri>http://127.0.0.1:9058</gateway.routes.epdc-app-server.uri>--> |
|||
<gateway.routes.epdc-heart-server.uri>lb://epdc-heart-server</gateway.routes.epdc-heart-server.uri> |
|||
<!-- <gateway.routes.epdc-heart-server.uri>http://127.0.0.1:9060</gateway.routes.epdc-heart-server.uri>--> |
|||
<gateway.routes.epdc-job-server.uri>lb://epdc-job-server</gateway.routes.epdc-job-server.uri> |
|||
<!-- <gateway.routes.epdc-job-server.uri>http://127.0.0.1:9061</gateway.routes.epdc-job-server.uri>--> |
|||
<gateway.routes.epdc-message-server.uri>lb://epdc-message-server |
|||
</gateway.routes.epdc-message-server.uri> |
|||
<!--<gateway.routes.epdc-news-server.uri>lb://epdc-news-server</gateway.routes.epdc-news-server.uri>--> |
|||
<gateway.routes.epdc-news-server.uri>http://127.0.0.1:9064</gateway.routes.epdc-news-server.uri> |
|||
<gateway.routes.epdc-oss-server.uri>lb://epdc-oss-server</gateway.routes.epdc-oss-server.uri> |
|||
<gateway.routes.epdc-events-server.uri>lb://epdc-events-server</gateway.routes.epdc-events-server.uri> |
|||
<!-- <gateway.routes.epdc-events-server.uri>http://127.0.0.1:9066</gateway.routes.epdc-events-server.uri>--> |
|||
<gateway.routes.epdc-cloud-analysis-server.uri>http://127.0.0.1:9060</gateway.routes.epdc-cloud-analysis-server.uri> |
|||
<!-- <gateway.routes.epdc-cloud-analysis-server.uri>lb://epdc-cloud-analysis-server</gateway.routes.epdc-cloud-analysis-server.uri>--> |
|||
<gateway.routes.epdc-work-record-server.uri>http://127.0.0.1:9085</gateway.routes.epdc-work-record-server.uri> |
|||
<!-- <gateway.routes.epdc-work-record-server.uri>lb://epdc-work-record-server</gateway.routes.epdc-work-record-server.uri>--> |
|||
<gateway.routes.epdc-services-server.uri>lb://epdc-services-server |
|||
</gateway.routes.epdc-services-server.uri> |
|||
<!-- <gateway.routes.epdc-services-server.uri>http://127.0.0.1:9067</gateway.routes.epdc-services-server.uri>--> |
|||
<!--<gateway.routes.epdc-user-server.uri>lb://epdc-user-server</gateway.routes.epdc-user-server.uri>--> |
|||
<gateway.routes.epdc-user-server.uri>http://127.0.0.1:9068</gateway.routes.epdc-user-server.uri> |
|||
<gateway.routes.epdc-demo-server.uri>lb://epdc-demo-server</gateway.routes.epdc-demo-server.uri> |
|||
<gateway.routes.epdc-group-server.uri>http://127.0.0.1:9063</gateway.routes.epdc-group-server.uri> |
|||
<!--<gateway.routes.epdc-group-server.uri>lb://epdc-group-server</gateway.routes.epdc-group-server.uri>--> |
|||
<gateway.routes.epdc-websocket-server.uri>lb://epdc-websocket-server</gateway.routes.epdc-websocket-server.uri> |
|||
<gateway.routes.epdc-kpi-server.uri>lb://epdc-kpi-server</gateway.routes.epdc-kpi-server.uri> |
|||
<!-- <gateway.routes.epdc-custom-server.uri>http://127.0.0.1:9076</gateway.routes.epdc-kpi-server.uri>--> |
|||
<gateway.routes.epdc-custom-server.uri>lb://epdc-custom-server</gateway.routes.epdc-custom-server.uri> |
|||
<!-- <gateway.routes.epdc-analysis-server.uri>http://127.0.0.1:9077</gateway.routes.epdc-analysis-server.uri>--> |
|||
<gateway.routes.epdc-analysis-server.uri>lb://epdc-analysis-server</gateway.routes.epdc-analysis-server.uri> |
|||
<!-- nacos --> |
|||
<nacos.register-enabled>false</nacos.register-enabled> |
|||
<nacos.server-addr>47.104.224.45:8848</nacos.server-addr> |
|||
<nacos.ip></nacos.ip> |
|||
<nacos.namespace>6a3577b4-7b79-43f6-aebb-9c3f31263f6a</nacos.namespace> |
|||
|
|||
<spring.zipkin.base-url>http://localhost:9411</spring.zipkin.base-url> |
|||
</properties> |
|||
</profile> |
|||
<profile> |
|||
<id>test</id> |
|||
<properties> |
|||
<server.port>10000</server.port> |
|||
<spring.profiles.active>test</spring.profiles.active> |
|||
<docker.tag>test</docker.tag> |
|||
|
|||
<!-- redis配置 --> |
|||
<spring.redis.index>2</spring.redis.index> |
|||
<spring.redis.host>47.104.224.45</spring.redis.host> |
|||
<spring.redis.port>6379</spring.redis.port> |
|||
<spring.redis.password>elink@888</spring.redis.password> |
|||
|
|||
<!-- gateway routes --> |
|||
<gateway.routes.epdc-auth-server.uri>lb://epdc-auth-server</gateway.routes.epdc-auth-server.uri> |
|||
<gateway.routes.epdc-admin-server.uri>lb://epdc-admin-server</gateway.routes.epdc-admin-server.uri> |
|||
<gateway.routes.epdc-activiti-server.uri>lb://epdc-activiti-server |
|||
</gateway.routes.epdc-activiti-server.uri> |
|||
<gateway.routes.epdc-api-server.uri>lb://epdc-api-server</gateway.routes.epdc-api-server.uri> |
|||
<gateway.routes.epdc-app-server.uri>lb://epdc-app-server</gateway.routes.epdc-app-server.uri> |
|||
<gateway.routes.epdc-heart-server.uri>lb://epdc-heart-server</gateway.routes.epdc-heart-server.uri> |
|||
<gateway.routes.epdc-job-server.uri>lb://epdc-job-server</gateway.routes.epdc-job-server.uri> |
|||
<gateway.routes.epdc-message-server.uri>lb://epdc-message-server |
|||
</gateway.routes.epdc-message-server.uri> |
|||
<gateway.routes.epdc-news-server.uri>lb://epdc-news-server</gateway.routes.epdc-news-server.uri> |
|||
<gateway.routes.epdc-oss-server.uri>lb://epdc-oss-server</gateway.routes.epdc-oss-server.uri> |
|||
<gateway.routes.epdc-events-server.uri>lb://epdc-events-server</gateway.routes.epdc-events-server.uri> |
|||
<gateway.routes.epdc-cloud-analysis-server.uri>lb://epdc-cloud-analysis-server</gateway.routes.epdc-cloud-analysis-server.uri> |
|||
<gateway.routes.epdc-work-record-server.uri>lb://epdc-work-record-server</gateway.routes.epdc-work-record-server.uri> |
|||
<gateway.routes.epdc-services-server.uri>lb://epdc-services-server |
|||
</gateway.routes.epdc-services-server.uri> |
|||
<gateway.routes.epdc-user-server.uri>lb://epdc-user-server</gateway.routes.epdc-user-server.uri> |
|||
<gateway.routes.epdc-demo-server.uri>lb://epdc-demo-server</gateway.routes.epdc-demo-server.uri> |
|||
<gateway.routes.epdc-group-server.uri>lb://epdc-group-server</gateway.routes.epdc-group-server.uri> |
|||
<gateway.routes.epdc-websocket-server.uri>lb://epdc-websocket-server</gateway.routes.epdc-websocket-server.uri> |
|||
<gateway.routes.epdc-kpi-server.uri>lb://epdc-kpi-server</gateway.routes.epdc-kpi-server.uri> |
|||
<gateway.routes.epdc-custom-server.uri>lb://epdc-custom-server</gateway.routes.epdc-custom-server.uri> |
|||
<gateway.routes.epdc-analysis-server.uri>lb://epdc-analysis-server</gateway.routes.epdc-analysis-server.uri> |
|||
|
|||
<!-- nacos --> |
|||
<nacos.register-enabled>true</nacos.register-enabled> |
|||
<nacos.server-addr>47.104.224.45:8848</nacos.server-addr> |
|||
<nacos.ip>47.104.85.99</nacos.ip> |
|||
<nacos.namespace>6a3577b4-7b79-43f6-aebb-9c3f31263f6a</nacos.namespace> |
|||
|
|||
<spring.zipkin.base-url>http://localhost:9411</spring.zipkin.base-url> |
|||
</properties> |
|||
</profile> |
|||
|
|||
<profile> |
|||
<id>prod</id> |
|||
<properties> |
|||
<server.port>9094</server.port> |
|||
<spring.profiles.active>prod</spring.profiles.active> |
|||
<docker.tag>prod</docker.tag> |
|||
|
|||
<!-- gateway routes --> |
|||
<gateway.routes.epdc-auth-server.uri>lb://epdc-auth-server</gateway.routes.epdc-auth-server.uri> |
|||
<gateway.routes.epdc-admin-server.uri>lb://epdc-admin-server</gateway.routes.epdc-admin-server.uri> |
|||
<gateway.routes.epdc-activiti-server.uri>lb://epdc-activiti-server</gateway.routes.epdc-activiti-server.uri> |
|||
<gateway.routes.epdc-api-server.uri>lb://epdc-api-server</gateway.routes.epdc-api-server.uri> |
|||
<gateway.routes.epdc-app-server.uri>lb://epdc-app-server</gateway.routes.epdc-app-server.uri> |
|||
<gateway.routes.epdc-heart-server.uri>lb://epdc-heart-server</gateway.routes.epdc-heart-server.uri> |
|||
<gateway.routes.epdc-job-server.uri>lb://epdc-job-server</gateway.routes.epdc-job-server.uri> |
|||
<gateway.routes.epdc-message-server.uri>lb://epdc-message-server</gateway.routes.epdc-message-server.uri> |
|||
<gateway.routes.epdc-news-server.uri>lb://epdc-news-server</gateway.routes.epdc-news-server.uri> |
|||
<gateway.routes.epdc-oss-server.uri>lb://epdc-oss-server</gateway.routes.epdc-oss-server.uri> |
|||
<gateway.routes.epdc-cloud-analysis-server.uri>lb://epdc-cloud-analysis-server</gateway.routes.epdc-cloud-analysis-server.uri> |
|||
<gateway.routes.epdc-work-record-server.uri>lb://epdc-work-record-server</gateway.routes.epdc-work-record-server.uri> |
|||
<gateway.routes.epdc-events-server.uri>lb://epdc-events-server</gateway.routes.epdc-events-server.uri> |
|||
<gateway.routes.epdc-services-server.uri>lb://epdc-services-server</gateway.routes.epdc-services-server.uri> |
|||
<gateway.routes.epdc-user-server.uri>lb://epdc-user-server</gateway.routes.epdc-user-server.uri> |
|||
<gateway.routes.epdc-demo-server.uri>lb://epdc-demo-server</gateway.routes.epdc-demo-server.uri> |
|||
<gateway.routes.epdc-group-server.uri>lb://epdc-group-server</gateway.routes.epdc-group-server.uri> |
|||
<gateway.routes.epdc-websocket-server.uri>lb://epdc-websocket-server</gateway.routes.epdc-websocket-server.uri> |
|||
<gateway.routes.epdc-kpi-server.uri>lb://epdc-kpi-server</gateway.routes.epdc-kpi-server.uri> |
|||
<gateway.routes.epdc-custom-server.uri>lb://epdc-custom-server</gateway.routes.epdc-custom-server.uri> |
|||
<gateway.routes.epdc-analysis-server.uri>lb://epdc-analysis-server</gateway.routes.epdc-analysis-server.uri> |
|||
|
|||
<!-- redis配置 --> |
|||
<spring.redis.index>0</spring.redis.index> |
|||
<spring.redis.host>172.16.0.54</spring.redis.host> |
|||
<spring.redis.port>6379</spring.redis.port> |
|||
<spring.redis.password>Elink833066</spring.redis.password> |
|||
|
|||
<!-- nacos --> |
|||
<nacos.register-enabled>true</nacos.register-enabled> |
|||
<nacos.server-addr>172.16.0.52:8848</nacos.server-addr> |
|||
<nacos.ip></nacos.ip> |
|||
<nacos.namespace></nacos.namespace> |
|||
|
|||
<spring.zipkin.base-url>http://localhost:9411</spring.zipkin.base-url> |
|||
</properties> |
|||
</profile> |
|||
</profiles> |
|||
|
|||
</project> |
@ -1,69 +0,0 @@ |
|||
/** |
|||
* Copyright (c) 2018 人人开源 All rights reserved. |
|||
* <p> |
|||
* https://www.renren.io
|
|||
* <p> |
|||
* 版权所有,侵权必究! |
|||
*/ |
|||
|
|||
package com.elink.esua.epdc.config; |
|||
|
|||
import org.springframework.beans.factory.annotation.Autowired; |
|||
import org.springframework.boot.autoconfigure.web.ServerProperties; |
|||
import org.springframework.context.annotation.Bean; |
|||
import org.springframework.context.annotation.Configuration; |
|||
import org.springframework.http.HttpHeaders; |
|||
import org.springframework.http.HttpMethod; |
|||
import org.springframework.http.HttpStatus; |
|||
import org.springframework.http.server.reactive.ServerHttpRequest; |
|||
import org.springframework.http.server.reactive.ServerHttpResponse; |
|||
import org.springframework.web.cors.reactive.CorsUtils; |
|||
import org.springframework.web.server.ServerWebExchange; |
|||
import org.springframework.web.server.WebFilter; |
|||
import org.springframework.web.server.WebFilterChain; |
|||
import reactor.core.publisher.Mono; |
|||
|
|||
/** |
|||
* Cors跨域 |
|||
* |
|||
* @author Mark sunlightcs@gmail.com |
|||
* @since 1.0.0 |
|||
*/ |
|||
@Configuration |
|||
public class CorsConfig { |
|||
|
|||
private static final String MAX_AGE = "18000L"; |
|||
|
|||
@Autowired |
|||
private ServerProperties serverProperties; |
|||
|
|||
@Bean |
|||
public WebFilter corsFilter() { |
|||
return (ServerWebExchange ctx, WebFilterChain chain) -> { |
|||
ServerHttpRequest request = ctx.getRequest(); |
|||
if (!CorsUtils.isCorsRequest(request)) { |
|||
return chain.filter(ctx); |
|||
} |
|||
HttpHeaders requestHeaders = request.getHeaders(); |
|||
ServerHttpResponse response = ctx.getResponse(); |
|||
HttpMethod requestMethod = requestHeaders.getAccessControlRequestMethod(); |
|||
HttpHeaders headers = response.getHeaders(); |
|||
if (!request.getURI().getPath().startsWith(serverProperties.getServlet().getContextPath().concat("/ws"))) { |
|||
headers.add(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, requestHeaders.getOrigin()); |
|||
headers.add(HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"); |
|||
} |
|||
headers.addAll(HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS, requestHeaders.getAccessControlRequestHeaders()); |
|||
if (requestMethod != null) { |
|||
headers.add(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS, requestMethod.name()); |
|||
} |
|||
headers.add(HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS, "*"); |
|||
headers.add(HttpHeaders.ACCESS_CONTROL_MAX_AGE, MAX_AGE); |
|||
if (request.getMethod() == HttpMethod.OPTIONS) { |
|||
response.setStatusCode(HttpStatus.OK); |
|||
return Mono.empty(); |
|||
} |
|||
return chain.filter(ctx); |
|||
}; |
|||
} |
|||
|
|||
} |
@ -1,54 +0,0 @@ |
|||
/** |
|||
* Copyright (c) 2018 人人开源 All rights reserved. |
|||
* <p> |
|||
* https://www.renren.io
|
|||
* <p> |
|||
* 版权所有,侵权必究! |
|||
*/ |
|||
|
|||
package com.elink.esua.epdc.feign; |
|||
|
|||
import com.elink.esua.epdc.common.token.dto.TokenDto; |
|||
import com.elink.esua.epdc.feign.fallback.ResourceFeignClientFallback; |
|||
import com.elink.esua.epdc.commons.tools.constant.ServiceConstant; |
|||
import com.elink.esua.epdc.commons.tools.security.user.UserDetail; |
|||
import com.elink.esua.epdc.commons.tools.utils.Result; |
|||
import org.springframework.cloud.openfeign.FeignClient; |
|||
import org.springframework.http.HttpHeaders; |
|||
import org.springframework.web.bind.annotation.GetMapping; |
|||
import org.springframework.web.bind.annotation.PostMapping; |
|||
import org.springframework.web.bind.annotation.RequestHeader; |
|||
import org.springframework.web.bind.annotation.RequestParam; |
|||
|
|||
/** |
|||
* 资源接口 |
|||
* |
|||
* @author Mark sunlightcs@gmail.com |
|||
* @since 1.0.0 |
|||
*/ |
|||
@FeignClient(name = ServiceConstant.EPDC_AUTH_SERVER, fallback = ResourceFeignClientFallback.class) |
|||
public interface ResourceFeignClient { |
|||
|
|||
/** |
|||
* 是否有资源访问权限 |
|||
* |
|||
* @param token token |
|||
* @param url 资源URL |
|||
* @param method 请求方式 |
|||
* @return 有访问权限,则返回用户信息 |
|||
*/ |
|||
@PostMapping("auth/resource") |
|||
Result<UserDetail> resource(@RequestHeader(HttpHeaders.ACCEPT_LANGUAGE) String language, @RequestParam("token") String token, |
|||
@RequestParam("url") String url, @RequestParam("method") String method); |
|||
|
|||
/** |
|||
* 获取登录用户信息 |
|||
* |
|||
* @param token |
|||
* @return com.elink.esua.epdc.commons.tools.utils.Result<com.elink.esua.epdc.commons.tools.security.user.CpUserDetail> |
|||
* @author |
|||
* @date 2019/8/19 17:19 |
|||
*/ |
|||
@GetMapping("auth/getLoginUserInfo") |
|||
Result<TokenDto> getLoginUserInfo(@RequestParam("token") String token); |
|||
} |
@ -1,35 +0,0 @@ |
|||
/** |
|||
* Copyright (c) 2018 人人开源 All rights reserved. |
|||
* <p> |
|||
* https://www.renren.io
|
|||
* <p> |
|||
* 版权所有,侵权必究! |
|||
*/ |
|||
|
|||
package com.elink.esua.epdc.feign.fallback; |
|||
|
|||
import com.elink.esua.epdc.common.token.dto.TokenDto; |
|||
import com.elink.esua.epdc.commons.tools.security.user.UserDetail; |
|||
import com.elink.esua.epdc.commons.tools.utils.Result; |
|||
import com.elink.esua.epdc.feign.ResourceFeignClient; |
|||
import org.springframework.stereotype.Component; |
|||
|
|||
/** |
|||
* 资源接口 Fallback |
|||
* |
|||
* @author Mark sunlightcs@gmail.com |
|||
* @since 1.0.0 |
|||
*/ |
|||
@Component |
|||
public class ResourceFeignClientFallback implements ResourceFeignClient { |
|||
|
|||
@Override |
|||
public Result<UserDetail> resource(String language, String token, String url, String method) { |
|||
return new Result<UserDetail>().error(); |
|||
} |
|||
|
|||
@Override |
|||
public Result<TokenDto> getLoginUserInfo(String token) { |
|||
return new Result<TokenDto>().error(); |
|||
} |
|||
} |
@ -1,154 +0,0 @@ |
|||
/** |
|||
* Copyright (c) 2018 人人开源 All rights reserved. |
|||
* |
|||
* https://www.renren.io
|
|||
* |
|||
* 版权所有,侵权必究! |
|||
*/ |
|||
|
|||
package com.elink.esua.epdc.filter; |
|||
|
|||
import com.alibaba.fastjson.JSON; |
|||
import com.elink.esua.epdc.feign.ResourceFeignClient; |
|||
import com.elink.esua.epdc.commons.tools.constant.Constant; |
|||
import com.elink.esua.epdc.commons.tools.security.user.UserDetail; |
|||
import com.elink.esua.epdc.commons.tools.utils.Result; |
|||
import org.apache.commons.lang3.StringUtils; |
|||
import org.springframework.beans.factory.annotation.Autowired; |
|||
import org.springframework.boot.context.properties.ConfigurationProperties; |
|||
import org.springframework.cloud.gateway.filter.GatewayFilterChain; |
|||
import org.springframework.cloud.gateway.filter.GlobalFilter; |
|||
import org.springframework.context.annotation.Configuration; |
|||
import org.springframework.core.io.buffer.DataBuffer; |
|||
import org.springframework.http.HttpHeaders; |
|||
import org.springframework.http.HttpStatus; |
|||
import org.springframework.http.MediaType; |
|||
import org.springframework.http.server.reactive.ServerHttpRequest; |
|||
import org.springframework.util.AntPathMatcher; |
|||
import org.springframework.web.server.ServerWebExchange; |
|||
import reactor.core.publisher.Flux; |
|||
import reactor.core.publisher.Mono; |
|||
|
|||
import java.nio.charset.StandardCharsets; |
|||
import java.util.List; |
|||
|
|||
/** |
|||
* 权限过滤器 |
|||
* |
|||
* @author Mark sunlightcs@gmail.com |
|||
* @since 1.0.0 |
|||
*/ |
|||
@Configuration |
|||
@ConfigurationProperties(prefix = "renren") |
|||
public class AuthFilter implements GlobalFilter { |
|||
|
|||
private final AntPathMatcher antPathMatcher = new AntPathMatcher(); |
|||
|
|||
@Autowired |
|||
private ResourceFeignClient resourceFeignClient; |
|||
/** |
|||
* 不拦截的urls |
|||
*/ |
|||
private List<String> urls; |
|||
|
|||
/** |
|||
* 不拦截工作端urls |
|||
*/ |
|||
private List<String> workLoginUrls; |
|||
|
|||
/** |
|||
* 拦截的工作端urls |
|||
*/ |
|||
private List<String> workUrls; |
|||
|
|||
@Override |
|||
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) { |
|||
ServerHttpRequest request = exchange.getRequest(); |
|||
String requestUri = request.getPath().pathWithinApplication().value(); |
|||
|
|||
//请求放行,无需验证权限
|
|||
if(pathMatcher(requestUri)){ |
|||
return chain.filter(exchange); |
|||
} |
|||
|
|||
//获取用户token
|
|||
String token = request.getHeaders().getFirst(Constant.TOKEN_HEADER); |
|||
if(StringUtils.isBlank(token)){ |
|||
token = request.getHeaders().getFirst(Constant.AUTHORIZATION_HEADER); |
|||
if (StringUtils.isBlank(token)) { |
|||
token = request.getQueryParams().getFirst(Constant.TOKEN_HEADER); |
|||
} |
|||
} |
|||
|
|||
//资源访问权限
|
|||
String language = request.getHeaders().getFirst(HttpHeaders.ACCEPT_LANGUAGE); |
|||
Result<UserDetail> result = resourceFeignClient.resource(language, token, requestUri, request.getMethod().toString()); |
|||
//没权限访问,直接返回
|
|||
if(!result.success()){ |
|||
return response(exchange, result); |
|||
} |
|||
|
|||
//获取用户信息
|
|||
UserDetail userDetail = result.getData(); |
|||
if(userDetail != null){ |
|||
//当前登录用户userId,添加到header中
|
|||
ServerHttpRequest build = exchange.getRequest().mutate().header(Constant.USER_KEY, userDetail.getId()+"").build(); |
|||
return chain.filter(exchange.mutate().request(build).build()); |
|||
} |
|||
|
|||
return chain.filter(exchange); |
|||
} |
|||
|
|||
private Mono<Void> response(ServerWebExchange exchange, Object object) { |
|||
String json = JSON.toJSONString(object); |
|||
DataBuffer buffer = exchange.getResponse().bufferFactory().wrap(json.getBytes(StandardCharsets.UTF_8)); |
|||
exchange.getResponse().getHeaders().setContentType(MediaType.APPLICATION_JSON_UTF8); |
|||
exchange.getResponse().setStatusCode(HttpStatus.OK); |
|||
return exchange.getResponse().writeWith(Flux.just(buffer)); |
|||
} |
|||
|
|||
private boolean pathMatcher(String requestUri){ |
|||
for (String url : workLoginUrls) { |
|||
if(antPathMatcher.match(url, requestUri)){ |
|||
return true; |
|||
} |
|||
} |
|||
|
|||
for (String url : workUrls) { |
|||
if(antPathMatcher.match(url, requestUri)){ |
|||
return false; |
|||
} |
|||
} |
|||
|
|||
for (String url : urls) { |
|||
if(antPathMatcher.match(url, requestUri)){ |
|||
return true; |
|||
} |
|||
} |
|||
return false; |
|||
} |
|||
|
|||
public List<String> getUrls() { |
|||
return urls; |
|||
} |
|||
|
|||
public void setUrls(List<String> urls) { |
|||
this.urls = urls; |
|||
} |
|||
|
|||
public List<String> getWorkLoginUrls() { |
|||
return workLoginUrls; |
|||
} |
|||
|
|||
public void setWorkLoginUrls(List<String> workLoginUrls) { |
|||
this.workLoginUrls = workLoginUrls; |
|||
} |
|||
|
|||
public List<String> getWorkUrls() { |
|||
return workUrls; |
|||
} |
|||
|
|||
public void setWorkUrls(List<String> workUrls) { |
|||
this.workUrls = workUrls; |
|||
} |
|||
} |
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue