1、fakecc删去发来掩码

2、c3pzff的url和sso的url动态配置
3、修改application-test从环境变量获取配置
This commit is contained in:
PushM
2024-04-29 22:53:35 +08:00
parent a1509dc269
commit 0b6174da96
5 changed files with 114 additions and 16 deletions

View File

@@ -11,13 +11,19 @@ COPY realtime_protection realtime_protection
# DORIS_URL="10.58.72.135:9030"\
# KAFKA_SERVERS="10.58.72.125:9092,10.58.72.126:9092,10.58.72.127:9092,10.58.72.128:9092,10.58.72.129:9092,\
# 10.58.72.130:9092,10.58.72.131:9092,10.58.72.132:9092,10.58.72.133:9092,10.58.72.134:9092"
# C3_PZFF_HOST="10.58.72.151:8088"\
# SSO_HOST='https://passport.iam.pub'
ENV MYSQL_USERNAME="root" \
MYSQL_PASSWD="aiihhbfcsy123!@#" \
MYSQL_URL="192.168.107.89:3306" \
DORIS_USERNAME="root" \
DORIS_PASSWD="" \
DORIS_URL="10.26.22.133:9030"\
KAFKA_SERVERS="172.29.128.1:9092"
KAFKA_SERVERS="172.29.128.1:9092"\
C3_PZFF_HOST="192.168.107.49:8088"\
SSO_HOST='114.243.134.122:9217'
EXPOSE 8081

View File

@@ -47,11 +47,11 @@ def format_data_to_json(test_data):
"protocol": protocol_v4,
"event_id": data_parts[24],
# test_data_2.split(",")[-7],
"mask_src_ip": data_parts[5],
"mask_dst_ip": data_parts[6],
"mask_src_port": int(data_parts[7]),
"mask_dst_ip": int(data_parts[8]),
"mask_protocol": int(data_parts[9])
# "mask_src_ip": data_parts[5],
# "mask_dst_ip": data_parts[6],
# "mask_src_port": int(data_parts[7]),
# "mask_dst_ip": int(data_parts[8]),
# "mask_protocol": int(data_parts[9])
},
"content": test_data
}
@@ -65,6 +65,8 @@ def delivery_report(err, msg):
conf = {
'bootstrap.servers': "192.168.107.49:9092",
# bootstrap.servers: '10.58.72.125:9092,10.58.72.126:9092,10.58.72.127:9092,10.58.72.128:9092,10.58.72.129:9092,\
# 10.58.72.130:9092,10.58.72.131:9092,10.58.72.132:9092,10.58.72.133:9092,10.58.72.134:9092',
'client.id': 'python-server'
}

View File

@@ -8,6 +8,7 @@ import com.realtime.protection.configuration.utils.enums.TaskTypeEnum;
import com.realtime.protection.configuration.utils.enums.audit.AuditStatusEnum;
import com.realtime.protection.server.command.CommandService;
import com.realtime.protection.server.task.TaskService;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.http.HttpStatus;
import org.springframework.web.reactive.function.client.WebClient;
import org.springframework.web.reactive.function.client.WebClientResponseException;
@@ -19,8 +20,11 @@ import java.util.concurrent.atomic.AtomicReference;
public class StateHandler {
@Value("${c3.pzff.host}")
private String c3_pzff;
private final WebClient client = WebClient.builder()
.baseUrl("http://192.168.107.49:8088")
// .baseUrl("http://192.168.107.49:8088")
.baseUrl("http://"+c3_pzff)
.build();
protected Boolean handleStart(TaskService taskService, CommandService commandService, Long taskId) {

View File

@@ -4,6 +4,7 @@ import org.apache.logging.log4j.util.Strings;
import javax.security.auth.login.LoginException;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -27,6 +28,9 @@ public class LoginService {
private final LoginMapper loginMapper;
@Value("${sso.host}")
private String HostSSO;
public LoginService(LoginMapper loginMapper) {
this.loginMapper = loginMapper;
}
@@ -50,7 +54,7 @@ public class LoginService {
ObjectMapper objectMapper = new ObjectMapper();
OkHttpClient client = new OkHttpClient();
Request request = new Request.Builder()
.url("http://114.243.134.122:9217/passport/accessToken?grant_type=client_credentials")
.url("http://" + HostSSO + "/passport/accessToken?grant_type=client_credentials")
.header("Authorization", "Basic TlNBRERAWlguT1JHOk14a1hHZ1ltOUNROUE3TCRSOCNLRW02R1pSeEhwd1c2")
.post(okhttp3.internal.Util.EMPTY_REQUEST)
.build();
@@ -74,7 +78,7 @@ public class LoginService {
RequestBody body = new MultipartBody.Builder().setType(MultipartBody.FORM)
.addFormDataPart("sessionData", sessionData).build();
request = new Request.Builder()
.url("http://114.243.134.122:9217/passport/accessApplication")
.url("http://" + HostSSO + "/passport/accessApplication")
.header("Authorization", "Bearer " + accessToken)
.header("Content-Type", "application/x-www-form-urlencoded")
.post(body)

View File

@@ -15,15 +15,16 @@ spring:
datasource:
mysql:
driver-class-name: com.mysql.cj.jdbc.Driver
username: root
password: aiihhbfcsy123!@#
url: jdbc:mysql://192.168.107.89:3306/realtime_protection?serverTimezone=Asia/Shanghai
username: ${MYSQL_USERNAME:root}
password: ${MYSQL_PASSWD}
url: jdbc:mysql://${MYSQL_URL:localhost:3306}/realtime_protection?serverTimezone=Asia/Shanghai
hikari:
is-auto-commit: false
doris:
driver-class-name: com.mysql.cj.jdbc.Driver
username: root
url: jdbc:mysql://10.26.22.133:9030/command
username: ${DORIS_USERNAME:root}
password: ${DORIS_PASSWD}
url: jdbc:mysql://${DORIS_URL:localhost:9030}/command
hikari:
is-auto-commit: false
aop:
@@ -33,6 +34,80 @@ spring:
grace-destroy: true
jackson:
default-property-inclusion: non_null
kafka:
# kafka集群信息多个用逗号间隔
bootstrap-servers: ${KAFKA_SERVERS}
consumer:
topic-name: topic-alert
# 消费者组
group-id: AlertGroup
# 是否自动提交偏移量默认值是true为了避免出现重复数据和数据丢失可以把它设置为false然后手动提交偏移量
enable-auto-commit: false
# 该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理:
# earliest当各分区下有已提交的offset时从提交的offset开始消费无提交的offset时从头开始消费分区的记录
# latest当各分区下有已提交的offset时从提交的offset开始消费无提交的offset时消费新产生的该分区下的数据在消费者启动之后生成的记录
# none当各分区都存在已提交的offset时从提交的offset开始消费只要有一个分区不存在已提交的offset则抛出异常
auto-offset-reset: latest
# 键的反序列化方式
key-deserializer: org.springframework.kafka.support.serializer.ErrorHandlingDeserializer
# 值的反序列化方式建议使用Json这种序列化方式可以无需额外配置传输实体类
value-deserializer: org.springframework.kafka.support.serializer.ErrorHandlingDeserializer
# 配置消费者的 Json 反序列化的可信赖包,反序列化实体类需要
properties:
spring.json.trusted.packages: com.realtime.protection.configuration.entity.*
# 这个参数定义了poll方法最多可以拉取多少条消息默认值为500。如果在拉取消息的时候新消息不足500条那有多少返回多少如果超过500条每次只返回500。
# 这个默认值在有些场景下太大有些场景很难保证能够在5min内处理完500条消息
# 如果消费者无法在5分钟内处理完500条消息的话就会触发reBalance,
# 然后这批消息会被分配到另一个消费者中,还是会处理不完,这样这批消息就永远也处理不完。
# 要避免出现上述问题提前评估好处理一条消息最长需要多少时间然后覆盖默认的max.poll.records参数
# 注需要开启BatchListener批量监听才会生效如果不开启BatchListener则不会出现reBalance情况
#max-poll-records: 3
spring.deserializer.key.delegate.class: org.springframework.kafka.support.serializer.JsonDeserializer
spring.deserializer.value.delegate.class: org.springframework.kafka.support.serializer.JsonDeserializer
# properties:
# # 两次poll之间的最大间隔默认值为5分钟。如果超过这个间隔会触发reBalance
# max:
# poll:
# interval:
# ms: 600000
# # 当broker多久没有收到consumer的心跳请求后就触发reBalance默认值是10s
# session:
# timeout:
# ms: 10000
producer:
# 重试次数设置大于0的值则客户端会将发送失败的记录重新发送
retries: 3
#批量处理大小16K
batch-size: 16384
#缓冲存储大32M
buffer-memory: 33554432
acks: 1
# 指定消息key和消息体的编码方式字符串序列化
key-serializer: org.springframework.kafka.support.serializer.JsonSerializer
#值序列化使用Json
value-serializer: org.springframework.kafka.support.serializer.JsonSerializer
# 监听
listener:
# record当每一条记录被消费者监听器ListenerConsumer处理之后提交
# batch当每一批poll()的数据被ListenerConsumer处理之后提交
# time当每一批poll()的数据被ListenerConsumer处理之后距离上次提交时间大于TIME时提交
# count当每一批poll()的数据被ListenerConsumer处理之后被处理record数量大于等于COUNT时提交
# count_timeTIME或COUNT中有一个条件满足时提交
# manual当每一批poll()的数据被ListenerConsumer处理之后, 手动调用Acknowledgment.acknowledge()后提交
# manual_immediate手动调用Acknowledgment.acknowledge()后立即提交,一般推荐使用这种
ack-mode: manual_immediate
# # 在侦听器容器中运行的线程数,一般设置为 机器数*分区数
# concurrency: 4
# # 自动提交关闭,需要设置手动消息确认
# ack-mode: manual_immediate
# # 消费监听接口监听的主题不存在时默认会报错所以设置为false忽略错误
# missing-topics-fatal: false
# # 两次poll之间的最大间隔默认值为5分钟。如果超过这个间隔会触发reBalance
# poll-timeout: 600000
mybatis:
mapper-locations: classpath:mappers/*
@@ -47,12 +122,19 @@ task:
springdoc:
api-docs:
enabled: true
path: /api-docs
path: /v3/api-docs
swagger-ui:
path: /swagger
path: /swagger-ui.html
packages-to-scan: com.realtime.protection.server
management:
endpoint:
shutdown:
enabled: true
c3:
pzff:
host: ${C3_PZFF_HOST}
sso:
host: ${SSO_HOST}