ELK5.5mysqlslow日志处理(filebeat)

2023年 7月 15日 45.3k 0

先打开数据库日志,csv是保存在mysql库中的。slow_query_log:是否开启慢查询,0或者OFF为关闭,1或者ON为开启,默认值为OFF关闭slow_query_log:是否开启慢查询,0或者OFF为关闭,1或者ON为开启,默认值为OFFlong_query_time:大于等于此时间记录慢查询日志,精度可达微秒级别,默认为10s。当设置为0时表示记录所谓查询记录

开启slow

MariaDB [(none)]> SET slow_query_log  = ON;
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> SET long_query_time = 5;
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> SET global log_output = 'FILE,TABLE';
Query OK, 0 rows affected (0.00 sec)

查看

这样就有了一个慢查询语句

MariaDB [mysql]> select sleep(5),USER,PASSWORD,HOST FROM user;
+----------+------+-------------------------------------------+--------------------------+
| sleep(5) | USER | PASSWORD                                  | HOST                     |
+----------+------+-------------------------------------------+--------------------------+
|        0 | root | *6691484EA6B50DDDE1926A220DA01FA9E575C18A | localhost                |
|        0 | root | *6691484EA6B50DDDE1926A220DA01FA9E575C18A | linuxea.com-node98.cluster.com |
|        0 | root | *6691484EA6B50DDDE1926A220DA01FA9E575C18A | 127.0.0.1                |
|        0 | root | *6691484EA6B50DDDE1926A220DA01FA9E575C18A | ::1                      |
+----------+------+-------------------------------------------+--------------------------+
4 rows in set (20.00 sec)

MariaDB [mysql]> 

在库中也可以查看

[root@linuxea-Node98 /data/mariadb/mysql]# mysql -uroot -p -h127.0.0.1
Welcome to the MariaDB monitor.  Commands end with ; or g.
Your MariaDB connection id is 4
Server version: 10.1.22-MariaDB MariaDB Server

Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or 'h' for help. Type 'c' to clear the current input statement.

MariaDB [(none)]> select * from mysql.slow_logG
*************************** 1. row ***************************
    start_time: 2017-09-03 12:03:28.557908
     user_host: root[root] @  [127.0.0.1]
    query_time: 00:00:20.000973
     lock_time: 00:00:00.000162
     rows_sent: 4
 rows_examined: 4
            db: mysql
last_insert_id: 0
     insert_id: 0
     server_id: 1
      sql_text: select sleep(5),USER,PASSWORD,HOST FROM user
     thread_id: 3
 rows_affected: 0
1 row in set (0.00 sec)

MariaDB [(none)]> 

这个语句是放在mysql下的slow_log.CSV文件

[root@linuxea.com-Node98 /data/mariadb/mysql]# cat slow_log.CSV
"2017-09-03 12:03:28.557908","root[root] @  [127.0.0.1]","00:00:20.000973","00:00:00.000162",4,4,"mysql",0,0,1,"select sleep(5),USER,PASSWORD,HOST FROM user",3,0
[root@linuxea.com-Node98 /data/mariadb/mysql]# 

grok后的结果大概是这个样子,如下图:mysql-slow.png

filebeat配置

filebeat配置如下,包含之前的nginx的,在一台:

[root@linuxea.com-Node117 /data/logs]# cat /etc/filebeat/filebeat.yml 
filebeat.prospectors:
 - input_type: log
   paths:
    - /data/logs/access_nginx.log
   document_type: nginx-access-117
 - input_type: log
   paths:
    - /data/logs/slow_log.CSV
   document_type: mysql-slow-117
output.redis:
  hosts: ["10.10.0.98"]
  password: "OTdmOWI4ZTM4NTY1M2M4OTZh"
  key: "default_list"
  db: 5
  timeout: 5
  keys:
    - key: "%{[type]}"
      mapping:
      "nginx-access-117": "nginx-access-117"
      "mysql-slow-117": "mysql-slow-117"

logstash配置

包含之前的nginx访问日志

[root@linuxea.com-Node49 /etc/logstash/conf.d]# cat redis-output.yml 
input {
    redis {
        host => "10.10.0.98"
        port => "6379"
        key => "nginx-access-117"
        data_type => "list"
        password => "OTdmOWI4ZTM4NTY1M2M4OTZh"
        threads => "5"
        db => "5"
         }
   redis {
        host => "10.10.0.98"
        port => "6379"
        key => "mysql-slow-117"
        data_type => "list"
        password => "OTdmOWI4ZTM4NTY1M2M4OTZh"
        threads => "5"
        db => "5"
        }
    }
filter {
   if [type] == "nginx-access-117" {
    grok {
        patterns_dir => [ "/etc/logstash/patterns.d" ]
        match => { "message" => "%{NGINXACCESS}" }
        overwrite => [ "message" ]
        }
    geoip {
        source => "clent_ip"
        target => "geoip"
#        database => "/etc/logstash/GeoLiteCity.dat"
        database => "/etc/logstash/GeoLite2-City.mmdb"
         }
    useragent {
        source => "User_Agent"
        target => "userAgent"
        }
    urldecode {
        all_fields => true
        }
     mutate {
            gsub => ["User_Agent","["]",""]        #将user_agent中的 " 换成空
            convert => [ "response","integer" ]
            convert => [ "body_bytes_sent","integer" ]
            convert => [ "bytes_sent","integer" ]
            convert => [ "upstream_response_time","float" ]
            convert => [ "upstream_status","integer" ]
            convert => [ "request_time","float" ]
            convert => [ "port","integer" ]
       }
    date {
    match => [ "timestamp" , "dd/MMM/YYYY:HH:mm:ss Z" ]
        }
        }
    if [type] == "mysql-slow-117" {
    csv {
      columns => [ "timestamp", "user_host", "query_time", "lock_time",
                   "rows_sent", "rows_examined", "db", "last_insert_id",
                   "insert_id", "server_id", "sql_text", "thread_id", "rows_affected" ]
    }
    mutate {
      convert => { "rows_sent" => "integer" }
      convert => { "rows_examined" => "integer" }
      convert => { "last_insert_id" => "integer" }
      convert => { "insert_id" => "integer" }
      convert => { "server_id" => "integer" }
      convert => { "thread_id" => "integer" }
      convert => { "rows_affected" => "integer" }
    }
    date {
      match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSSSSS" ]
      remove_field => [ "timestamp" ]
    }
#    mutate { remove_field => [ "message" ] }
    mutate {
      gsub => [
        "query_time", "(.*.)(d)(d)d+", "123",
        "lock_time", "(.*.)(d)(d)d+", "123"
        ]
        }
     ruby { code => "event.set('query_time' , event.get('query_time') ? event.get('query_time').split(':').inject(0){|a, m| a = a * 60 + m.to_f} : 0)"}
     ruby { code => "event.set('lock_time' , event.get('lock_time') ? event.get('lock_time').split(':').inject(0){|a, m| a = a * 60 + m.to_f} : 0)" }
  }
  }
output {
    if "_grokparsefailure" in [tags] {
    file { path => "/var/log/logstash/grokparsefailure-%{[type]}-%{+YYYY.MM.dd}.log" }
    }
    if [type] == "nginx-access-117" {
    elasticsearch {
        hosts => ["10.0.1.49:9200"]
        index => "logstash-nginx-access-117-%{+YYYY.MM.dd}"
        user => "elastic"
        password => "linuxea"
    }
    }
    if [type] == "mysql-slow-117" {
    elasticsearch {
        hosts => ["10.0.1.49:9200"]
        index => "logstash-mysql-slow-117-%{+YYYY.MM.dd}"
        user => "elastic"
        password => "linuxea"
    }
    }
    stdout {codec => rubydebug}
}

启动

启动lostash后查看日志:mysql-csv1.jpg而后在kibana添加展示即可

相关文章

对接alertmanager创建钉钉卡片(1)
手把手教你搭建OpenFalcon监控系统
无需任何魔法即可使用 Ansible 的神奇变量“hostvars”
openobseve HA本地单集群模式
基于k8s上loggie/vector/openobserve日志收集
openobseve单节点和查询语法

发布评论