logback-spring.xml

<?xml version="1.0" encoding="UTF-8"?>
<configuration  scan="true" scanPeriod="10 seconds" debug="false">


    <!--上下文名称-->
    <contextName>logback</contextName>

    <!--日志根目录 -->
     <!--<property name="log.path" value="C:/logs" />-->
    <springProperty scope="context" name="log.path" source="spring.application.logpath" defaultValue="C:/logs"/>
	
    <springProperty scope="context" name="servicename" source="spring.application.name" defaultValue="UnknownService"/>
    <springProperty scope="context" name="env" source="spring.profiles.active" defaultValue="dev"/>
    <springProperty scope="context" name="serviceport" source="server.port" defaultValue="80"/>

    <!--以上三行需要和yml对应-->

    <!--输出日志到控制台 -->
    <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
            <level>INFO</level>
        </filter>
        <encoder>
            <pattern>%yellow(%date{yyyy-MM-dd HH:mm:ss}) |%highlight(%-5level) |%blue(%thread) |%green(%file:%line) |%magenta(%logger) |%cyan(%msg%n)</pattern>
            <charset>UTF-8</charset>
        </encoder>
    </appender>

    <!--①.level=INFO的日志文件 -->
    <appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${log.path}/info/infolevel_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>
            <maxFileSize>100MB</maxFileSize>
            <maxHistory>15</maxHistory>
            <totalSizeCap>2GB</totalSizeCap>
        </rollingPolicy>
        <!--日志输出级别-->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>INFO</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <!--日志文件输出格式-->
        <encoder>
            <pattern>[%d{yyyy-MM-dd HH:mm:ss.SSS}] %thread %-5level %logger{50} --- %msg%n</pattern>
            <charset>UTF-8</charset>
        </encoder>
    </appender>

    <!--②.level=WARN的日志文件 -->
    <appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!--基本设置-->
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${log.path}/warn/warnlevel_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>
            <maxFileSize>100MB</maxFileSize>
            <maxHistory>15</maxHistory>
            <totalSizeCap>2GB</totalSizeCap>
        </rollingPolicy>
        <!--日志输出级别-->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>WARN</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <!--日志文件输出格式-->
        <encoder>
            <pattern>[%d{yyyy-MM-dd HH:mm:ss.SSS}] %thread %-5level %logger{50} - %msg%n</pattern>
            <charset>UTF-8</charset>
        </encoder>
    </appender>

    <!--③.level=ERROR的日志文件 -->
    <appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!--基本设置-->
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${log.path}/error/errorlever_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>
            <maxFileSize>100MB</maxFileSize>
            <maxHistory>15</maxHistory>
            <totalSizeCap>2GB</totalSizeCap>
        </rollingPolicy>
        <!--日志输出级别-->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>ERROR</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <!--日志文件输出格式-->
        <encoder>
            <pattern>[%d{yyyy-MM-dd HH:mm:ss.SSS}] %thread %-5level %logger{50} - %msg%n</pattern>
            <charset>UTF-8</charset>
        </encoder>
    </appender>
   <!-- <appender name="KafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
        <encoder class="com.github.danielwegener.logback.kafka.encoding.LayoutKafkaMessageEncoder">
            <layout class="net.logstash.logback.layout.LogstashLayout" >
                <!– 是否包含上下文 –>
                <includeContext>true</includeContext>
                <!– 是否包含日志来源 –>
                <includeCallerData>true</includeCallerData>
                <!– 自定义附加字段 –>
                <customFields>{"system":"test"}</customFields>
                <!– 自定义字段的简称 –>
                <fieldNames class="net.logstash.logback.fieldnames.ShortenedFieldNames"/>
            </layout>
            <charset>UTF-8</charset>
        </encoder>
        <!–kafka topic 需要与配置文件里面的topic一致 否则kafka会沉默并鄙视你–>
        <topic>applog_dev</topic>
        <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.HostNameKeyingStrategy" />
        <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />
        <producerConfig>bootstrap.servers=124.71.59.186:9092,139.159.249.142:9092,124.71.85.73:9092</producerConfig>
        <!– don't wait for a broker to ack the reception of a batch.  –>
        <producerConfig>acks=0</producerConfig>
        <!– wait up to 1000ms and collect log messages before sending them as a batch –>
        <producerConfig>linger.ms=1000</producerConfig>
        <!– even if the producer buffer runs full, do not block the application but start to drop messages –>
        <producerConfig>max.block.ms=0</producerConfig>
        <!– define a client-id that you use to identify yourself against the kafka broker –>
        <producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig>

         this is the fallback appender if kafka is not available.
        <appender-ref ref="CONSOLE" />
    </appender>-->

   <!-- <appender name="KafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
        <encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
            <providers class="net.logstash.logback.composite.loggingevent.LoggingEventJsonProviders">
                <pattern>
            <!–        <pattern>
                        {
                        "env": "${env}",
                        "servicename":"${servicename}",
                        "type":"${servicename}",
                        "serviceinfo":"%serviceip:${serviceport}",
                        "date":"%d{yyyy-MM-dd HH:mm:ss.SSS}",
                        "level":"%level",
                        "thread": "%thread",
                        "logger": "%logger{36}",
                        "msg":"%msg",
                        "exception":"%exception"
                        }
                    </pattern>–>
                    <pattern>
                        {
                        "env": "${env}",
                        "servicename":"${servicename}",
                        "type":"${servicename}",
                        "serviceinfo":"%serviceip:${serviceport}",
                        "date":"%d{yyyy-MM-dd HH:mm:ss.SSS}",
                        "level":"%level",
                        "thread": "%thread",
                        "msg":"%msg",
                        "exception":"%exception"
                        }
                    </pattern>
                </pattern>
            </providers>
        </encoder>
        <topic>appdev</topic>
        <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>
        <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
        <producerConfig>acks=0</producerConfig>
        <producerConfig>linger.ms=1000</producerConfig>
        <producerConfig>max.block.ms=0</producerConfig>
        <producerConfig>bootstrap.servers=${bootstrapServers}</producerConfig>

    </appender>-->

<!--    <appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
        <appender-ref ref="KafkaAppender"/>
    </appender-->>

    <!--当 “开发” “测试” 环境的话,激活该配置-->
    <root >
        <appender-ref ref="CONSOLE"/>
        <appender-ref ref="INFO_FILE"/>
        <appender-ref ref="WARN_FILE"/>
        <appender-ref ref="ERROR_FILE"/>
    </root>
  <!-- <springProfile name="dev,test">
        <root >
            <appender-ref ref="CONSOLE"/>
            <appender-ref ref="INFO_FILE"/>
            <appender-ref ref="WARN_FILE"/>
            <appender-ref ref="ERROR_FILE"/>
            <appender-ref ref="ASYNC"/>
        </root>
    </springProfile>
    <!–当不是生产环境的话,激活该配置–>
    <springProfile name="!prod">
        <root >
            <appender-ref ref="CONSOLE"/>
            <appender-ref ref="INFO_FILE"/>
            <appender-ref ref="WARN_FILE"/>
            <appender-ref ref="ERROR_FILE"/>
            <appender-ref ref="ASYNC"/>
        </root>
    </springProfile>
-->
<!--    <root>
        <appender-ref ref="CONSOLE"/>
        <appender-ref ref="INFO_FILE"/>
        <appender-ref ref="WARN_FILE"/>
        <appender-ref ref="ERROR_FILE"/>
    </root>-->
     <!--springProfile表示在dev环境下使用 -->
<!--     <springProfile name="dev">
         <logger name="com.nick" level="INFO" additivity="true">
             <appender-ref ref="KafkaAppender" />
         </logger>
     </springProfile>-->

</configuration>

  

上一篇:记录日志logback


下一篇:50余本中外Python电子教程及源码下载地址