

두 가지 방법이 있다
여러가지 이유로 후자가 합리적이어 보인다.
ES와 키바나는 이미 연결되어있다고 가정
도커 및 도커 컴포즈를 사용한다
JDBC connector는 직접 다운해야하지만 JDBC input plugin은 이미지 다운받으면 자동으로 설정된다
ES와 logstash를 연결하기 위해선 es와 kibana 연결할 때 사용했던 인증서가 필요하다
나는 yml이나 conf같은 설정파일들은 로컬에서 작성하고 도커 컨테이너가 만들어질 때 복사하도록 설정했다 (컨테이너에 직접 들어가서 파일을 만들어도 됨)
logstash:
image: docker.elastic.co/logstash/logstash:8.17.1
container_name: logstash
build:
dockerfile: Dockerfile-logstash
ports:
- 5044:5044
input {
jdbc {
jdbc_driver_library => "/usr/share/logstash/logstash-core/lib/jars/mysql-connector-java-8.0.30.jar"
jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
jdbc_connection_string => "jdbc:mysql://mysql:3306/picky?useSSL=false&useUnicode=true&characterEncoding=UTF-8&autoReconnection=true"
jdbc_user => "유저명"
jdbc_password => "비밀번호"
tracking_column => "unix_ts_in_secs"
use_column_value => true
tracking_column_type => "numeric"
schedule => "*/10 * * * * *"
statement => "SELECT *, UNIX_TIMESTAMP(updated_at) AS unix_ts_in_secs FROM movie WHERE (UNIX_TIMESTAMP(updated_at) > :sql_last_value AND updated_at < NOW()) ORDER BY updated_at ASC"
last_run_metadata_path => "/usr/share/logstash/.logstash_jdbc_last_run"
}
}
filter {
mutate {
copy => { "id" => "[@metadata][movieId]" }
}
date {
match => ["updated_at", "yyyy-MM-dd HH:mm:ss.SSSSSS"]
target => "updated_at"
timezone => "Asia/Seoul"
}
mutate {
remove_field => ["id", "@version", "unix_ts_in_secs"]
}
}
output {
stdout { codec => rubydebug }
elasticsearch {
hosts => ["https://es01:9200"]
ssl_certificate_authorities => ["/usr/share/logstash/config/certs/es01/es01.crt"]
user => 유저명
password => 비밀번호
index => "connector-movie"
document_id => "%{[@metadata][movieId]}"
}
}
api.http.host: "0.0.0.0"
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.hosts: [ "https://es01:9200" ]
xpack.monitoring.elasticsearch.ssl.certificate_authority: "/usr/share/logstash/config/certs/es01/es01.crt"
xpack.monitoring.elasticsearch.username: 유저명
xpack.monitoring.elasticsearch.password: 비밀번호
path.config : "pipeline"
FROM docker.elastic.co/logstash/logstash:8.17.1
USER root
RUN mkdir -p /usr/share/logstash/logstash-core/lib/jars/ && \
curl -L -O https://downloads.mysql.com/archives/get/p/3/file/mysql-connector-java-8.0.30.tar.gz && \
tar -xzf mysql-connector-java-8.0.30.tar.gz -C /tmp && \
ls -lah /tmp && \
cp /tmp/mysql-connector-java-8.0.30/mysql-connector-java-8.0.30.jar /usr/share/logstash/logstash-core/lib/jars/ && \
chown logstash:logstash /usr/share/logstash/logstash-core/lib/jars/mysql-connector-java-8.0.30.jar && \
chmod 755 /usr/share/logstash/logstash-core/lib/jars/mysql-connector-java-8.0.30.jar && \
rm -rf /tmp/mysql-connector-java-8.0.30.tar.gz /tmp/mysql-connector-java-8.0.30
RUN mkdir -p /usr/share/logstash/config/certs/ && \
chown -R logstash:logstash /usr/share/logstash/config/certs/ && \
chmod -R 755 /usr/share/logstash/config/certs/
COPY ./logstash/config /usr/share/logstash/config
COPY ./logstash/pipeline /usr/share/logstash/pipeline
COPY ./certs /usr/share/logstash/config/certs
RUN chown -R logstash:logstash /usr/share/logstash/config/certs/ && \
chmod -R 755 /usr/share/logstash/config/certs/
USER logstash


ES와 MySQL의 자료개수가 똑같다. (movie)

필요했던 데이터들이 잘 들어와있다. 3개의 필드만 보이는데 열면 12개의 필드가 더 있다.
