深入理解 Logback.xml文件及运用
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- Override CONSOLE_LOG_PATTERN from defaults.xml -->
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<conversionRule conversionWord="ip" converterClass="com.longfor.gaia.gfs.core.utils.IPAddressConverter" />
<conversionRule conversionWord="m" converterClass="com.property.commons.log.sensitive.SensitiveMessageConverter"/>
<springProperty scope="context" name="spring_application_name" source="spring.application.name" />
<springProperty scope="context" name="server_port" source="server.port" />
<springProperty scope="context" name="LOG_HOME" source="logging.path" defalutValue="../logs"/>
<springProperty scope="context" name="env" source="env" defalutValue="sit"/>
<springProperty scope="context" name="console_switch" source="log.console.switch" defalutValue="false"/>
<property name="CONSOLE_LOG_PATTERN" value="%clr(${spring_application_name}){cyan}||%clr(%d{ISO8601}){faint}|%clr(%p)|%X{requestId}|%X{X-B3-TraceId:-}|%X{requestIp}|%X{userIp}|%ip|${server_port}|${PID}|%clr(%t){faint}|%clr(%.40logger{39}){cyan}.%clr(%method){cyan}:%L|%m|%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
<property name="FILE_LOG_PATTERN" value="${spring_application_name}||%d{ISO8601}|%p|%X{requestId}|%X{X-B3-TraceId:-}|%X{requestIp}|%X{userIp}|%ip|${server_port}|${PID}|%t|%.40logger{39}.%method:%L|%m|%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
<include resource="org/springframework/boot/logging/logback/console-appender.xml"/>
<springProperty scope="context" name="spring_application_name" source="spring.application.name" />
<springProperty scope="context" name="kafka_enabled" source="longfor.web.logging.kafka.enabled"/>
<springProperty scope="context" name="kafka_broker" source="longfor.web.logging.kafka.broker"/>
<springProperty scope="context" name="kafka_env" source="longfor.web.logging.kafka.env"/>
<!--定义日志文件大小 超过这个大小会压缩归档 -->
<property name="DEBUG_MAX_FILE_SIZE" value="100MB"/>
<property name="INFO_MAX_FILE_SIZE" value="100MB"/>
<property name="ERROR_MAX_FILE_SIZE" value="100MB"/>
<property name="TRACE_MAX_FILE_SIZE" value="100MB"/>
<property name="WARN_MAX_FILE_SIZE" value="100MB"/>
<!--定义日志文件最长保存时间 -->
<property name="DEBUG_MAX_HISTORY" value="9"/>
<property name="INFO_MAX_HISTORY" value="9"/>
<property name="ERROR_MAX_HISTORY" value="9"/>
<property name="TRACE_MAX_HISTORY" value="9"/>
<property name="WARN_MAX_HISTORY" value="9"/>
<!--定义归档日志文件最大保存大小,当所有归档日志大小超出定义时,会触发删除 -->
<property name="DEBUG_TOTAL_SIZE_CAP" value="5GB"/>
<property name="INFO_TOTAL_SIZE_CAP" value="5GB"/>
<property name="ERROR_TOTAL_SIZE_CAP" value="5GB"/>
<property name="TRACE_TOTAL_SIZE_CAP" value="5GB"/>
<property name="WARN_TOTAL_SIZE_CAP" value="5GB"/>
<!-- 按照每天生成日志文件 -->
<appender name="DEBUG_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 当前Log文件名 -->
<file>${LOG_HOME}/debug.log</file>
<!-- 压缩备份设置 -->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/backup/debug/debug.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxHistory>${DEBUG_MAX_HISTORY}</maxHistory>
<maxFileSize>${DEBUG_MAX_FILE_SIZE}</maxFileSize>
<totalSizeCap>${DEBUG_TOTAL_SIZE_CAP}</totalSizeCap>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${FILE_LOG_PATTERN}</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>DEBUG</level>
</filter>
</appender>
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 当前Log文件名 -->
<file>${LOG_HOME}/info.log</file>
<!-- 压缩备份设置 -->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/backup/info/info.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxHistory>${INFO_MAX_HISTORY}</maxHistory>
<maxFileSize>${INFO_MAX_FILE_SIZE}</maxFileSize>
<totalSizeCap>${INFO_TOTAL_SIZE_CAP}</totalSizeCap>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${FILE_LOG_PATTERN}</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 当前Log文件名 -->
<file>${LOG_HOME}/warn.log</file>
<!-- 压缩备份设置 -->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/backup/warn/warn.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxHistory>${WARN_MAX_HISTORY}</maxHistory>
<maxFileSize>${WARN_MAX_FILE_SIZE}</maxFileSize>
<totalSizeCap>${WARN_TOTAL_SIZE_CAP}</totalSizeCap>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${FILE_LOG_PATTERN}</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>WARN</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 当前Log文件名 -->
<file>${LOG_HOME}/error.log</file>
<!-- 压缩备份设置 -->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/backup/error/error.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxHistory>${ERROR_MAX_HISTORY}</maxHistory>
<maxFileSize>${ERROR_MAX_FILE_SIZE}</maxFileSize>
<totalSizeCap>${ERROR_TOTAL_SIZE_CAP}</totalSizeCap>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${FILE_LOG_PATTERN}</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender name="KAFKA" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${FILE_LOG_PATTERN}</pattern>
</encoder>
<topic>${kafka_env}applog_${spring_application_name}</topic>
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" />
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />
<producerConfig>bootstrap.servers=${kafka_broker}</producerConfig>
<!-- don't wait for a broker to ack the reception of a batch. -->
<producerConfig>acks=0</producerConfig>
<!-- wait up to 1000ms and collect log messages before sending them as a batch -->
<producerConfig>linger.ms=1000</producerConfig>
<!-- even if the producer buffer runs full, do not block the application but start to drop messages -->
<producerConfig>max.block.ms=0</producerConfig>
<!-- Optional parameter to use a fixed partition -->
<!-- <partition>8</partition>-->
</appender>
<appender name="KAFKA_ASYNC" class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="KAFKA" />
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="INFO_FILE"/>
<appender-ref ref="WARN_FILE"/>
<appender-ref ref="ERROR_FILE"/>
<if condition='"true".equals(property("kafka_enabled"))'>
<then>
<appender-ref ref="KAFKA_ASYNC"/>
</then>
</if>
</root>
</configuration>