MySQL은 이미 설치되어있다는 가정에서 진행한다.
https://prometheus.io/download/#mysqld_exporter
https://github.com/prometheus/mysqld_exporter
생성된 계정은 mysqld_exporter의 접속용으로 사용된다.
CREATE USER 'test'@'localhost' IDENTIFIED BY 'test123' WITH MAX_USER_CONNECTIONS 3;
GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO 'test'@'localhost';
FLUSH PRIVILEGES;
EXIT;
[client]
socket=/var/run/mysqld/mysqld.sock
user=test
password=test123
[client] : MySQL에 연동되는 Third party program의 연결 설정
client 그룹의 상세 파라미터
https://dev.mysql.com/doc/refman/8.0/en/mysql-command-options.html
About MySQL Option files
https://dev.mysql.com/doc/refman/8.0/en/option-files.html
ExecStart 파라미터에서 Exporter의 실행 Flag들을 정의해준다.
각 Flag의 기능은 깃헙에서 확인 가능하다.
아래 설정값은 대부분 Default 설정이며, 최대한 모든 정보를 수집하는 형태로 설정했다.
Flag를 통해 my.cnf와 Web listen port를 정의해준다.
[Unit]
Description=Prometheus MySQL Exporter
After=network.target
User=prometheus
Group=prometheus
[Service]
Type=simple
Restart=always
ExecStart=/opt/mysqld_exporter-0.14.0.linux-amd64/mysqld_exporter \
--config.my-cnf=/etc/mysql/my.cnf \
--web.listen-address=0.0.0.0:9104 \
--collect.engine_tokudb_status \
--collect.global_status \
--collect.global_variables \
--collect.info_schema.clientstats \
--collect.info_schema.innodb_metrics \
--collect.info_schema.innodb_tablespaces \
--collect.info_schema.innodb_cmp \
--collect.info_schema.innodb_cmpmem \
--collect.info_schema.processlist \
--collect.info_schema.processlist.min_time=0 \
--collect.info_schema.query_response_time \
--collect.info_schema.replica_host \
--collect.info_schema.tables \
--collect.info_schema.tables.databases=‘*’ \
--collect.info_schema.tablestats \
--collect.info_schema.schemastats \
--collect.info_schema.userstats \
--collect.mysql.user \
--collect.perf_schema.eventsstatements \
--collect.perf_schema.eventsstatements.digest_text_limit=120 \
--collect.perf_schema.eventsstatements.limit=250 \
--collect.perf_schema.eventsstatements.timelimit=86400 \
--collect.perf_schema.eventsstatementssum \
--collect.perf_schema.eventswaits \
--collect.perf_schema.file_events \
--collect.perf_schema.file_instances \
--collect.perf_schema.file_instances.remove_prefix=false \
--collect.perf_schema.indexiowaits \
--collect.perf_schema.memory_events \
--collect.perf_schema.memory_events.remove_prefix=false \
--collect.perf_schema.tableiowaits \
--collect.perf_schema.tablelocks \
--collect.perf_schema.replication_group_members \
--collect.perf_schema.replication_group_member_stats \
--collect.perf_schema.replication_applier_status_by_worker \
--collect.slave_status \
--collect.slave_hosts \
--collect.heartbeat \
--collect.heartbeat.database=true \
--collect.heartbeat.table=true \
--collect.heartbeat.utc
[Install]
WantedBy=multi-user.target
MySQL Exporter Flag information - github README.md
https://github.com/prometheus/mysqld_exporter
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "node-prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["192.168.0.1:9090"]
labels:
group: 'prometheus'
- job_name: 'mysql-dbaas hansu test'
scrape_interval: 5s
static_configs:
- targets: ["192.168.0.2:9104"]
labels:
group: 'mysql'