Commit a34d7847 by huangjinxin

Merge remote-tracking branch 'origin/develop' into develop

2 parents 66627511 e9cb7768
......@@ -23,7 +23,7 @@ import java.util.Objects;
* @date 2023/10/24
*/
@RestController
@RequestMapping("/pea-order/msg")
@RequestMapping("/pea-order")
public class MsgController {
@Autowired
......@@ -38,7 +38,7 @@ public class MsgController {
* @author RenPing
* @date 2023/10/24
*/
@GetMapping("/list")
@GetMapping("/msg/list")
public Result<PageResult<MsgGroupResp>> list(@RequestHeader(name = "userId", required = true) String userId,
MsgDTO.Request request) {
CommonUtil.setNullValue(request);
......@@ -65,7 +65,7 @@ public class MsgController {
* @author RenPing
* @date 2023/10/24
*/
@RequestMapping(value = "/delete", method = RequestMethod.POST)
@RequestMapping(value = "/msg/delete", method = RequestMethod.POST)
public Result<Boolean> delete(@RequestHeader(name = "userId", required = true) String userId, @RequestBody @Valid MsgDTO.DeleteDto deleteDto) {
AssertUtil.isNotEmpty(userId, "用户ID不能为空");
msgService.delete(deleteDto.getId(), userId);
......
......@@ -50,44 +50,6 @@ public class PeaApiController {
};
/**
* 2.1MQ 组织架构同步事件通知
*
* @param requestParam 请求参数
* @return 成功或者失败
* @apiNote BEAN系统当组织架构信息变化的时候,通知PEA系统;然后PEA系统,将会根据变化的要素信息,再去向BEAN系统发起查询,去获取详细的信息;
*/
public Result<?> organizationTree(@NotNull(message = "请求参数不能为空") @Validated({ValidGroup.PeaTree.class})
@RequestBody OrganizationTreeDTO.Request requestParam) {
return Result.success(null);
}
/**
* 2.2MQ 组织人员同步事件通知
*
* @param requestParam 请求参数
* @return 成功或者失败
* @apiNote 当组织人员有变化的时候,BEAN调用本接口,告知PEA系统,该组织人员有变化,PEA系统再去BEAN系统发起查询,获取该组织所有的人员信息保存到本地;
*/
public Result<?> organizationStaff(@RequestBody OrganizationTreeDTO.Request requestParam) {
return Result.success(null);
}
/**
* 2.3MQ 技术员信息同步事件通知
*
* @param requestParam 请求参数
* @return 成功或者失败
* @apiNote 将网点/小组内的工程师信息,推送给PEA系统;包括2种模式:全量/增量;
*/
public Result<?> organizationEngineer(@RequestBody EngineerParamDTO.Request requestParam) {
return Result.success(null);
}
/**
* 3.1GET 查询技术员日历
*
* @param engineerCode 工程师编码
......
......@@ -50,5 +50,6 @@ public interface MsgDao extends JpaRepository<MsgEntity, Long> {
") t"
, nativeQuery = true // 开启原生sql
)
List<Map<String, Objects>> getList(@Param("req") MsgDTO.Request req, @Param("msgGroupResp") MsgGroupResp msgGroupResp, Pageable pageable);
List<Map<String, Objects>> getList(@Param("req") MsgDTO.Request req
, @Param("msgGroupResp") MsgGroupResp msgGroupResp, Pageable pageable);
}
......@@ -59,9 +59,9 @@ public class MsgServiceImpl implements MsgService {
msgGroupResp.setGroupName(clusterDao.getByClusterId(msgGroupResp.getBranchId()).getName());
}
Sort sort2 = Sort.by("asc".equalsIgnoreCase(request.getSortType()) ? Sort.Direction.ASC : Sort.Direction.DESC, request.getSort());
Pageable pageable2 = PageRequest.of(0, Integer.MAX_VALUE, sort2);
List<Map<String, Objects>> msgMapList = msgDao.getList(request, msgGroupResp, pageable2);
List<Map<String, Objects>> msgMapList = msgDao.getList(request,
msgGroupResp,
PageRequest.of(0, Integer.MAX_VALUE, sort));
List<MsgResp> msgList = msgMapList.stream().map(msgMap -> {
MsgResp msgResp = JSONObject.parseObject(JSONObject.toJSONString(msgMap), MsgResp.class);
msgResp.setTypeName(MsgTypeEnum.getNameByValue(msgResp.getType()));
......
<?xml version="1.0" encoding="UTF-8"?>
<!--该日志将日志级别不同的log信息保存到不同的文件中 -->
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<springProperty name="LOG_MAXFILESIZE" scope="context" source="logback.filesize" defaultValue="100MB"/>
<springProperty name="LOG_FILEMAXDAY" scope="context" source="logback.filemaxday" defaultValue="30"/>
<!-- 日志在工程中的输出位置 -->
<property name="LOG_FILE" value="logs/${spring.application.name:-}"/>
<!-- 控制台的日志输出样式 -->
<property name="CONSOLE_LOG_PATTERN"
value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!--日志文件输出格式-->
<property name="FILE_LOG_PATTERN"
value="%d{yyyy-MM-dd HH:mm:ss} %-5level ${spring.application.name:-} %thread %logger %msg%n"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<!-- 日志输出编码 -->
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- 按照每天生成常规日志文件 -->
<appender name="FileAppender" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_FILE}/info/${spring.application.name:-}.log</file>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${FILE_LOG_PATTERN}</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 基于时间的分包策略 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_FILE}/info/${spring.application.name:-}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<!--保留时间,单位:天-->
<maxHistory>${LOG_FILEMAXDAY}</maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${LOG_MAXFILESIZE}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 异常文件输出设置,将异常堆栈另外保存一份到单独的文件中,方便查找 -->
<appender name="FILE_ERROR"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>${LOG_FILE}/error/${spring.application.name:-}.error.log</File>
<rollingPolicy
class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<FileNamePattern>${LOG_FILE}/error/${spring.application.name:-}.error-%d{yyyy-MM-dd}-%i.zip
</FileNamePattern>
<maxFileSize>${LOG_MAXFILESIZE}</maxFileSize>
<maxHistory>${LOG_FILEMAXDAY}</maxHistory>
<totalSizeCap>500MB</totalSizeCap>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter"><!-- 只打印错误日志 -->
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<filter class="ch.qos.logback.classic.filter.LevelFilter"><!-- 只打印警告日志 -->
<level>WARN</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender name="FILE_DEBUG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_FILE}/debug/${spring.application.name:-}.log</file>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${FILE_LOG_PATTERN}</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 基于时间的分包策略 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_FILE}/debug/${spring.application.name:-}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<!--保留时间,单位:天-->
<maxHistory>${LOG_FILEMAXDAY}</maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${LOG_MAXFILESIZE}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>DEBUG</level>
</filter>
</appender>
<!-- 异步输出 -->
<appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
<!-- 0-不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>256</queueSize>
<!-- 添加附加的appender,最多只能添加一个 -->
<appender-ref ref="FileAppender"/>
</appender>
<!-- 日志输出级别 -->
<root level="debug">
<appender-ref ref="console"/>
<appender-ref ref="FileAppender"/>
<appender-ref ref="FILE_ERROR"/>
<appender-ref ref="FILE_DEBUG"/>
</root>
</configuration>
\ No newline at end of file
<configuration>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{yyyy-MM-dd'T'HH:mm:ss.SSSZ} %level [%thread] %logger{15} : %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE" />
</root>
</configuration>
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!