架构之旅-扩展-服务-日志

简述

本文中,将使用logback为service/data提供日志功能,并提供CONSOLE,FILE,KAFKA三种日志输出方式。

源代码

iarc-service

环境

IDE: IntelliJ IDEA
JDK: 1.8.0_u162
Framework: spring boot, logback
Middleware: Kafka, Zookeeper

开发

配置

pom.xml

  • parent

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
        <janino.version>2.5.16</janino.version>
    <kafka-logback.version>1.0.2</kafka-logback.version>
    </properties>
    ...
    <!--kafka logback-->
    <dependency>
    <groupId>ru.sberned</groupId>
    <artifactId>kafka-logback</artifactId>
    <version>${kafka-logback.version}</version>
    </dependency>

    <!-- Logback EvaluatorFilter 需要使用 -->
    <dependency>
    <groupId>org.codehaus.janino</groupId>
    <artifactId>janino</artifactId>
    <version>${janino.version}</version>
    </dependency>
    </dependencies>
  • iarc-service

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    <!--janino-->
    <dependency>
    <groupId>org.codehaus.janino</groupId>
    <artifactId>janino</artifactId>
    </dependency>

    <!--kafka logback-->
    <dependency>
    <groupId>ru.sberned</groupId>
    <artifactId>kafka-logback</artifactId>
    </dependency>

application.properties

1
2
3
4
5
# kafka
kafka.bootstrapServers=192.168.56.103:9092

# log file
logging.file=/var/log/iarc/iarc_service_data

logback-spring.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!--使用application配置-->
<springProperty scope="context" name="kafkaBootstrapServers" source="kafka.bootstrapServers"/>
<!--其他配置參考 https://github.com/spring-projects/spring-boot/tree/master/spring-boot/src/main/resources/org/springframework/boot/logging/logback -->
<!--引入默认的一些设置-->
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<!--web信息-->
<logger name="org.springframework" level="WARN"/>
<logger name="com.alibaba.dubbo" level="WARN" />
<logger name="org.apache.zookeeper" level="WARN" />

<!--写入日志到控制台的appender,用默认的,但是要去掉charset,否则windows下tomcat下乱码-->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
</encoder>
</appender>

<!--写入日志到文件的appender-->
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.core.filter.EvaluatorFilter">
<evaluator> <!-- defaults to type ch.qos.logback.classic.boolex.JaninoEventEvaluator -->
<expression>return message.contains("javax.BusinessException");</expression>
</evaluator>
<OnMismatch>DENY</OnMismatch>
<OnMatch>NEUTRAL</OnMatch>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--日志文件输出的文件名,每天一个文件-->
<FileNamePattern>${LOG_FILE}.%d{yyyy-MM-dd}.log</FileNamePattern>
<!--日志文件保留天数-->
<maxHistory>30</maxHistory>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
</encoder>
<!--日志文件最大的大小-->
<triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
<MaxFileSize>10MB</MaxFileSize>
</triggeringPolicy>
</appender>

<!--异步到文件-->
<appender name="asyncFileAppender" class="ch.qos.logback.classic.AsyncAppender">
<discardingThreshold>0</discardingThreshold>
<queueSize>50</queueSize>
<appender-ref ref="FILE"/>
</appender>

<!--写日志到kafka-->
<appender name="KAFKA" class="ru.sberned.kafkalogback.KafkaAppender">
<topic>iarc-log</topic>
<bootstrapServers>${kafkaBootstrapServers}</bootstrapServers>
<valueSerializer>org.apache.kafka.common.serialization.StringSerializer</valueSerializer>
<failOnStartup>false</failOnStartup>
<customProp>acks|all</customProp>
<layout class="ch.qos.logback.classic.PatternLayout">
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level mgt %logger{50} - %msg%n</pattern>
</layout>
</appender>
<!--引用KAFKA配置,并且会异步进行写入-->
<appender name="KafkaAsync" class="ch.qos.logback.classic.AsyncAppender">
<neverBlock>true</neverBlock>
<includeCallerData>true</includeCallerData>
<appender-ref ref="KAFKA" />
</appender>
<!--排除kafka client的log-->
<logger name="org.apache.kafka" level="INFO" additivity="false">
<appender-ref ref="CONSOLE" />
<appender-ref ref="FILE" />
</logger>

<!--生产环境:打印控制台和输出到文件-->
<springProfile name="prd">
<root level="INFO">
<appender-ref ref="asyncFileAppender"/>
<appender-ref ref="KafkaAsync"/>
</root>
</springProfile>

<springProfile name="default">
<root level="INFO">
<appender-ref ref="asyncFileAppender"/>
<appender-ref ref="KafkaAsync"/>
<appender-ref ref="CONSOLE"/>
</root>
</springProfile>
</configuration>

代码

  • UserController
    1
    2
    3
    4
    5
    public List<User> getAllUsers() {
    logger.warn("javax.BusinessException: complex getAllUsers");
    logger.warn("simple getAllUsers");
    return userService.getAllUsers();
    }

kafka

  • 新建一个topic用于日志
    1
    $ opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic iarc-log

调试

启动iarc-service

访问http://localhost:8001/user/getAllUsers,可以看到控制台显示了2条日志,而文件C:\var\log\iarc\iarc_service_data.***.log仅显示一条,这是由于我们使用filter进行了过滤。

检查kafka日志

可以看到有2条日志显示

1
2
$ cd /tmp/kafka-logs/iarc-log-0
$ tail 00000000000000000000.log

kafka-manager

可按这里的步骤配置kafka-manager,便于管理kafka。

部署

部署的过程与该系列中其他文章相同,不再赘述。

参考