K8s 版本:
Client Version: v1.28.15
Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3
Server Version: v1.28.15

之前看到 logback.xml 就头痛,现在再看它,一行行倒是能大概猜出它每行的作用,运维搞久了的好处。

ElasticSearch

Helm ES 官方的资源

helm repo add elastic https://helm.elastic.co
helm upgrade --install my-elasticsearch elastic/elasticsearch --version 8.5.1 -n elk -f elasticsearch-values.yaml
elasticsearch-values.yaml
# helm upgrade --install my-elasticsearch elastic/elasticsearch --version 8.5.1 -n elk -f elasticsearch-values.yaml
replicas: 1
minimumMasterNodes: 1

secret:
  enabled: true
  password: "elasticsearch123" # generated randomly if not defined

persistence:
  enabled: true
  labels:
    # Add default labels for the volumeClaimTemplate of the StatefulSet
    enabled: false
  annotations: {}

protocol: https
httpPort: 9200
transportPort: 9300

extraEnvs:
  - name: TZ
    value: "Asia/Shanghai"

Kibana

Helm bitnami 的资源,kibana官方最新的版本只有一个,有问题就换了(官方给出东西竟然都不自测的。。。)

helm repo add bitnami https://charts.bitnami.com/bitnami

helm upgrade --install my-kibana bitnami/kibana --version 10.2.8 -n elk -f kibana-values.yaml

注意:
1.按需修改 ingress,elasticsearch.hosts 配置
2.需要在上一步 es 自动安装的 secret(elasticsearch-master-credentials) 中添加 elasticsearch-password 字段,值直接复制 password 的就行
kibana-values.yaml
# helm upgrade --install my-kibana bitnami/kibana --version 10.2.8 -n elk -f kibana-values.yaml
ingress:
  ## @param ingress.enabled Enable ingress controller resource
  ##
  enabled: true
  ## DEPRECATED: Use ingress.annotations instead of ingress.certManager
  ## certManager: false
  ##

  ## @param ingress.pathType Ingress Path type
  ##
  pathType: ImplementationSpecific
  ## @param ingress.apiVersion Override API Version (automatically detected if not set)
  ##
  apiVersion: ""
  ## @param ingress.hostname Default host for the ingress resource. If specified as "*" no host rule is configured
  ##
  hostname: kibana.test.scntsc.com
  ## @param ingress.path The Path to Kibana. You may need to set this to '/*' in order to use this with ALB ingress controllers.
  ##
  path: /
  ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
  ## For a full list of possible ingress annotations, please see
  ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
  ## Use this parameter to set the required annotations for cert-manager, see
  ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
  ##
  ## e.g:
  ## annotations:
  ##   kubernetes.io/ingress.class: nginx
  ##   cert-manager.io/cluster-issuer: cluster-issuer-name
  ##
  annotations: {}
  ## @param ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter
  ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }}
  ## You can use the ingress.secrets parameter to create this TLS secret or rely on cert-manager to create it
  ##
  tls: false
  ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
  ##
  selfSigned: false
  ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
  ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
  ## extraHosts:
  ## - name: kibana.local
  ##   path: /
  ##
  extraHosts: []
  ## @param ingress.extraPaths Additional arbitrary path/backend objects
  ## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
  ## extraPaths:
  ## - path: /*
  ##   backend:
  ##     serviceName: ssl-redirect
  ##     servicePort: use-annotation
  ##
  extraPaths: []
  ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
  ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
  ## extraTls:
  ## - hosts:
  ##     - kibana.local
  ##   secretName: kibana.local-tls
  ##
  extraTls: []
  ## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
  ## key and certificate should start with -----BEGIN CERTIFICATE----- or
  ## -----BEGIN RSA PRIVATE KEY-----
  ##
  ## name should line up with a tlsSecret set further up
  ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
  ##
  ## It is also possible to create and manage the certificates outside of this helm chart
  ## Please see README.md for more information
  ## e.g:
  ## - name: kibana.local-tls
  ##   key:
  ##   certificate:
  ##
  secrets: []
  ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
  ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
  ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
  ##
  ingressClassName: "nginx"
  ## @param ingress.extraRules The list of additional rules to be added to this ingress record. Evaluated as a template
  ## Useful when looking for additional customization, such as using different backend
  ##
  extraRules: []

elasticsearch:
  ## @param elasticsearch.hosts List of elasticsearch hosts to connect to.
  ## e.g:
  ## hosts:
  ##   - elasticsearch-1
  ##   - elasticsearch-2
  ##
  hosts:
    - elasticsearch-master
  ## @param elasticsearch.port Elasticsearch port
  ##
  port: "9200"
  security:
    auth:
      ## @param elasticsearch.security.auth.enabled Set to 'true' if Elasticsearch has authentication enabled
      ##
      enabled: true
      ## @param elasticsearch.security.auth.kibanaPassword Password of the 'kibana_system' user, used to authenticate Kibana connection with Elasticsearch.
      ##
      kibanaPassword: "123456"
      ## @param elasticsearch.security.auth.existingSecret Name of the existing secret containing the password for the 'kibana_system' user.
      ##
      existingSecret: ""
      ## @param elasticsearch.security.auth.createSystemUser If enabled, Kibana will use Elasticsearch API to create the 'kibana_system' user at startup.
      ##
      createSystemUser: true
      ## @param elasticsearch.security.auth.elasticsearchPasswordSecret Name of the existing secret containing the password for the 'elastic' user.
      ## Required if createSystemUser=true. The secret must containt the key 'elasticsearch-password'.
      ##
      elasticsearchPasswordSecret: "elasticsearch-master-credentials"
    tls:
      ## @param elasticsearch.security.tls.enabled Set to 'true' if Elasticsearch API uses TLS/SSL (HTTPS)
      ##
      enabled: true
      ## @param elasticsearch.security.tls.verificationMode Verification mode for SSL communications.
      ## Supported values: full, certificate, none.
      ## Ref: https://www.elastic.co/guide/en/kibana/7.x/settings.html#elasticsearch-ssl-verificationmode
      # verificationMode: "full"
      verificationMode: "none"
      ## @param elasticsearch.security.tls.existingSecret Name of the existing secret containing Elasticsearch Truststore or CA certificate. Required unless verificationMode=none
      ##
      existingSecret: ""
      ## @param elasticsearch.security.tls.usePemCerts Set to 'true' to use PEM certificates instead of PKCS12.
      ##
      usePemCerts: false
      ## @param elasticsearch.security.tls.truststorePassword Password to access the PKCS12 trustore in case it is password-protected.
      ##
      truststorePassword: ""
      ## @param elasticsearch.security.tls.passwordsSecret Name of a existing secret containing the Truststore password
      ##
      passwordsSecret: ""

Logstash

Helm Logstash 官方的资源

helm repo add elastic https://helm.elastic.co

helm upgrade --install my-logstash elastic/logstash --version 8.5.1 -n elk -f logstash-values.yaml

注意:logstash.conf,logstash.yml 为自定义配置
logstash-values.yaml
# helm upgrade --install my-logstash elastic/logstash --version 8.5.1 -n elk -f logstash-values.yaml
---
replicas: 1

# Allows you to add any config files in /usr/share/logstash/config/
# such as logstash.yml and log4j2.properties
#
# Note that when overriding logstash.yml, `http.host: 0.0.0.0` should always be included
# to make default probes work.
#logstashConfig: {}
logstashConfig:
  logstash.yml: |
    http.host: "0.0.0.0"
    http.port: 9600
    xpack.monitoring.enabled: true
    xpack.monitoring.elasticsearch.hosts: ["https://elasticsearch-master:9200"]
    xpack.monitoring.elasticsearch.username: "elastic"
    xpack.monitoring.elasticsearch.password: "elasticsearch123"
    xpack.monitoring.elasticsearch.ssl.verification_mode: "none"
#  logstash.yml: |
#    key:
#      nestedkey: value
#  log4j2.properties: |
#    key = value

# Allows you to add any pipeline files in /usr/share/logstash/pipeline/
### ***warn*** there is a hardcoded logstash.conf in the image, override it first
logstashPipeline:
  logstash.conf: |
    input {
      beats {
        port => 5044
        type => 'beats'
      }
      http {
        port => 8880
        type => 'http'
      }
      tcp {
        port => 5050
        host => "0.0.0.0"
        type =>  "all"
        codec => json_lines
      }
      tcp {
        port => 5051
        mode => "server"
        host => "0.0.0.0"
        type =>  "springcloud"
        codec => json_lines
      }
    }
    output {
      if [type] == "springcloud" {
        elasticsearch {
          hosts => ["https://elasticsearch-master:9200"]
          user => "elastic"
          password => "elasticsearch123"
          index => "springcloud-%{[APP_NAME]}-%{+YYYY.MM.dd}"
          # ssl => false
          ssl_certificate_verification => false
        }
      } else {
        elasticsearch {
          hosts => ["https://elasticsearch-master:9200"]
          user => "elastic"
          password => "elasticsearch123"
          index => "logstash-%{type}-%{+YYYY.MM.dd}"
          # ssl => false
          ssl_certificate_verification => false
        }
      }
    }
    filter {
      if [host] and [host][ip] {
        mutate {
          replace => { "host" => "%{[host][ip]}" }
        }
      }

      mutate {
        lowercase => ["APP_NAME"]
      }
    }
#  logstash.conf: |
#    input {
#      exec {
#        command => "uptime"
#        interval => 30
#      }
#    }
#    output { stdout { } }

# Allows you to add any pattern files in your custom pattern dir
logstashPatternDir: "/usr/share/logstash/patterns/"
logstashPattern: {}
#    pattern.conf: |
#      DPKG_VERSION [-+~<>\.0-9a-zA-Z]+

# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
# extraEnvs: []
extraEnvs:
  - name: TZ
    value: "Asia/Shanghai"
#  - name: MY_ENVIRONMENT_VAR
#    value: the_value_goes_here

# Allows you to load environment variables from kubernetes secret or config map
envFrom: []
# - secretRef:
#     name: env-secret
# - configMapRef:
#     name: config-map

# Add sensitive data to k8s secrets
secrets: []
#  - name: "env"
#    value:
#      ELASTICSEARCH_PASSWORD: "LS1CRUdJTiBgUFJJVkFURSB"
#      api_key: ui2CsdUadTiBasRJRkl9tvNnw
#  - name: "tls"
#    value:
#      ca.crt: |
#        LS0tLS1CRUdJT0K
#        LS0tLS1CRUdJT0K
#        LS0tLS1CRUdJT0K
#        LS0tLS1CRUdJT0K
#      cert.crt: "LS0tLS1CRUdJTiBlRJRklDQVRFLS0tLS0K"
#      cert.key.filepath: "secrets.crt" # The path to file should be relative to the `values.yaml` file.

# A list of secrets and their paths to mount inside the pod
secretMounts: []

hostAliases: []
#- ip: "127.0.0.1"
#  hostnames:
#  - "foo.local"
#  - "bar.local"

image: "docker.elastic.co/logstash/logstash"
imageTag: "8.5.1"
imagePullPolicy: "IfNotPresent"
imagePullSecrets: []

podAnnotations: {}

# additionals labels
labels: {}

logstashJavaOpts: "-Xmx1g -Xms1g"

resources:
  requests:
    cpu: "100m"
    memory: "1536Mi"
  limits:
    cpu: "1000m"
    memory: "1536Mi"

volumeClaimTemplate:
  accessModes: ["ReadWriteOnce"]
  resources:
    requests:
      storage: 1Gi

rbac:
  create: false
  serviceAccountAnnotations: {}
  serviceAccountName: ""
  annotations:
    {}
    #annotation1: "value1"
    #annotation2: "value2"
    #annotation3: "value3"

podSecurityPolicy:
  create: false
  name: ""
  spec:
    privileged: false
    fsGroup:
      rule: RunAsAny
    runAsUser:
      rule: RunAsAny
    seLinux:
      rule: RunAsAny
    supplementalGroups:
      rule: RunAsAny
    volumes:
      - secret
      - configMap
      - persistentVolumeClaim

persistence:
  enabled: false
  annotations: {}

extraVolumes:
  []
  # - name: extras
  #   emptyDir: {}

extraVolumeMounts:
  []
  # - name: extras
  #   mountPath: /usr/share/extras
  #   readOnly: true

extraContainers:
  []
  # - name: do-something
  #   image: busybox
  #   command: ['do', 'something']

extraInitContainers:
  []
  # - name: do-something
  #   image: busybox
  #   command: ['do', 'something']

# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""

# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"

# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "hard"

# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
nodeAffinity: {}

# This is inter-pod affinity settings as defined in
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
podAffinity: {}

# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"

httpPort: 9600

# Custom ports to add to logstash
extraPorts:
  []
  # - name: beats
  #   containerPort: 5001

updateStrategy: RollingUpdate

# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1

podSecurityContext:
  fsGroup: 1000
  runAsUser: 1000

securityContext:
  capabilities:
    drop:
      - ALL
  # readOnlyRootFilesystem: true
  runAsNonRoot: true
  runAsUser: 1000

# How long to wait for logstash to stop gracefully
terminationGracePeriod: 120

# Probes
# Default probes are using `httpGet` which requires that `http.host: 0.0.0.0` is part of
# `logstash.yml`. If needed probes can be disabled or overridden using the following syntaxes:
#
# disable livenessProbe
# livenessProbe: null
#
# replace httpGet default readinessProbe by some exec probe
# readinessProbe:
#   httpGet: null
#   exec:
#     command:
#       - curl
#      - localhost:9600

livenessProbe:
  httpGet:
    path: /
    port: http
  initialDelaySeconds: 300
  periodSeconds: 10
  timeoutSeconds: 5
  failureThreshold: 3
  successThreshold: 1

readinessProbe:
  httpGet:
    path: /
    port: http
  initialDelaySeconds: 60
  periodSeconds: 10
  timeoutSeconds: 5
  failureThreshold: 3
  successThreshold: 3

## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""

nodeSelector: {}
# tolerations: []
tolerations:
  - key: "storage"
    operator: "Equal"
    value: "slow"
    effect: "NoSchedule"

nameOverride: ""
fullnameOverride: ""

lifecycle:
  {}
  # preStop:
  #   exec:
  #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
  # postStart:
  #   exec:
  #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]

service:
  {}
  # annotations: {}
  # type: ClusterIP
  # loadBalancerIP: ""
  # ports:
  #   - name: beats
  #     port: 5044
  #     protocol: TCP
  #     targetPort: 5044
  #   - name: http
  #     port: 8080
  #     protocol: TCP
  #     targetPort: 8080

ingress:
  enabled: false
  annotations:
    {}
    # kubernetes.io/tls-acme: "true"
  className: "nginx"
  pathtype: ImplementationSpecific
  hosts:
    - host: logstash-example.local
      paths:
        - path: /beats
          servicePort: 5044
        - path: /http
          servicePort: 8080
  tls: []
  #  - secretName: logstash-example-tls
  #    hosts:
  #      - logstash-example.local

logback.xml配置

主要内容:
appender 中的 LOGSTASH,destination 自定义
springProfile 下 添加 <appender-ref ref="LOGSTASH"/>
logback.xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
    <contextName>${APP_NAME}</contextName>
    <springProperty name="APP_NAME" scope="context" source="spring.application.name"/>
    <springProperty name="LOG_FILE" scope="context" source="logging.file" defaultValue="./logs/app/${APP_NAME}"/>
    <springProperty name="LOG_TRACK_FILE" scope="context" source="logging.file" defaultValue="./logs/track"/>
    <springProperty name="LOG_AUDIT_FILE" scope="context" source="logging.file" defaultValue="./logs/audit"/>
    <springProperty name="LOG_MAXFILESIZE" scope="context" source="logback.filesize" defaultValue="50MB"/>
    <springProperty name="LOG_FILEMAXDAY" scope="context" source="logback.filemaxday" defaultValue="7"/>
    <springProperty name="ServerIP" scope="context" source="spring.cloud.client.ip-address" defaultValue="0.0.0.0"/>
    <springProperty name="ServerPort" scope="context" source="server.port" defaultValue="0000"/>

    <!-- 彩色日志 -->
    <!-- 彩色日志依赖的渲染类 -->
    <conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter"/>
    <conversionRule conversionWord="wex"
                    converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter"/>
    <conversionRule conversionWord="wEx"
                    converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter"/>

    <!-- 彩色日志格式 -->
    <property name="CONSOLE_LOG_PATTERN"
              value="[${APP_NAME}:${ServerIP}:${ServerPort}] %clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(%level){blue} %clr(${PID}){magenta} %clr([%X{traceId}]){yellow} %clr([%thread]){orange} %clr(%logger){cyan} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
    <property name="CONSOLE_LOG_PATTERN_NO_COLOR"
              value="[${APP_NAME}:${ServerIP}:${ServerPort}] %d{yyyy-MM-dd HH:mm:ss.SSS} %level ${PID} [%X{traceId}] [%thread] %logger %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>

    <!-- 🔥 发送日志到 Logstash(JSON 格式) -->
    <appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
        <destination>my-logstash-logstash-0.my-logstash-logstash-headless.elk.svc.cluster.local:5051</destination>
<!--        <destination>host.test.scntsc.com:31540</destination>-->
        <encoder class="net.logstash.logback.encoder.LogstashEncoder">
<!--            <customFields>{"app_name": "${APP_NAME}", "server_ip": "${ServerIP}", "server_port": "${ServerPort}"}</customFields>-->
        </encoder>
    </appender>
    <!-- 控制台日志 -->
    <appender name="StdoutAppender" class="ch.qos.logback.core.ConsoleAppender">
        <withJansi>true</withJansi>
        <encoder>
            <pattern>${CONSOLE_LOG_PATTERN}</pattern>
            <charset>UTF-8</charset>
        </encoder>
    </appender>
    <!-- 按照每天生成常规日志文件 -->
    <appender name="FileAppender" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${LOG_FILE}/${APP_NAME}.log</file>
        <encoder>
            <pattern>${CONSOLE_LOG_PATTERN_NO_COLOR}</pattern>
            <charset>UTF-8</charset>
        </encoder>
        <!-- 基于时间的分包策略 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${LOG_FILE}/${APP_NAME}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <!--保留时间,单位:天-->
            <maxHistory>${LOG_FILEMAXDAY}</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>${LOG_MAXFILESIZE}</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>INFO</level>
        </filter>
    </appender>

    <appender name="track_log" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${LOG_TRACK_FILE}/track.log</file>
        <encoder>
            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS}|${APP_NAME}|%msg%n</pattern>
            <charset>UTF-8</charset>
        </encoder>
        <!-- 基于时间的分包策略 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${LOG_TRACK_FILE}/track.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <!--保留时间,单位:天-->
            <maxHistory>${LOG_FILEMAXDAY}</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>${LOG_MAXFILESIZE}</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>
    </appender>

    <appender name="audit_log" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${LOG_AUDIT_FILE}/audit.log</file>
        <encoder>
            <pattern>%msg%n</pattern>
            <charset>UTF-8</charset>
        </encoder>
        <!-- 基于时间的分包策略 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${LOG_AUDIT_FILE}/audit.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <!--保留时间,单位:天-->
            <maxHistory>${LOG_FILEMAXDAY}</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>${LOG_MAXFILESIZE}</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>
    </appender>

    <appender name="track_log_async" class="ch.qos.logback.classic.AsyncAppender">
        <discardingThreshold>0</discardingThreshold>
        <appender-ref ref="track_log"/>
    </appender>
    <appender name="file_async" class="ch.qos.logback.classic.AsyncAppender">
        <discardingThreshold>0</discardingThreshold>
        <appender-ref ref="FileAppender"/>
    </appender>
    <appender name="audit_log_async" class="ch.qos.logback.classic.AsyncAppender">
        <discardingThreshold>0</discardingThreshold>
        <appender-ref ref="audit_log"/>
    </appender>

    <logger name="org.mate.core.log.util" level="debug" addtivity="false">
        <appender-ref ref="track_log_async"/>
    </logger>
    <logger name="com.scnyw.system.service.impl.SysLogServiceImpl" level="debug" addtivity="false">
        <appender-ref ref="audit_log_async"/>
    </logger>

    <!-- MyBatis log configure -->
    <logger name="org.apache.ibatis" level="INFO"/>
    <logger name="org.mybatis.spring" level="DEBUG"/>
    <logger name="java.sql.Connection" level="DEBUG"/>
    <logger name="java.sql.Statement" level="DEBUG"/>
    <logger name="java.sql.PreparedStatement" level="DEBUG"/>
    <logger name="org.hibernate.validator.internal.util.Version" level="off" />

    <!--可以输出项目中的debug日志,包括mybatis的sql日志-->
    <logger name="org.mate.**.mapper" level="INFO" />

    <springProfile name="local,dev,test,prod,docker">
        <root level="INFO">
            <appender-ref ref="StdoutAppender"/>
<!--            <appender-ref ref="file_async"/>-->
            <appender-ref ref="LOGSTASH"/> <!-- 🔥 添加 Logstash -->
        </root>
    </springProfile>
</configuration>

发表回复

您的电子邮箱地址不会被公开。 必填项已用 * 标注