ELK Install
ELK Install
ELK Install
com/how-to-install-the-elk-stack-on-rhel-8/
https://www.itzgeek.com/how-tos/linux/centos-how-tos/how-to-install-elk-stack-on-rhel-8.html
https://tel4vn.edu.vn/cai-dat-elk-stack-tren-centos-7/
https://www.digitalocean.com/community/tutorials/how-to-install-elasticsearch-logstash-and-kibana-elk-stack-on-
ubuntu-14-04
sudo vi /etc/sysctl.conf
sysctl -w vm.max_map_count=262144
vm.max_map_count=262144
sysctl -p
sudo vi /etc/pam.d/common-session
add: session required pam_limits.so
sudo vi /etc/pam.d/common-session-noninteractive
add: session required pam_limits.so
# Configure Kibana
sudo vi /etc/kibana/kibana.yml
server.host: "0.0.0.0"
server.name: "vnpasuatapv61"
elasticsearch.hosts: ["http://10.191.224.34:9200"]
# Access Kibana -> need create the administrative Kibana user and password, and store them in the htpasswd.users file.
http://10.191.224.34:5601/app/home
#### Install and Configure Logstash, It will act as a centralized logs server for your client systems which runs an agent
like filebeat
sudo yum -y install logstash
# Configure Logstash
sudo cp /etc/logstash/logstash-sample.conf /etc/logstash/conf.d/logstash.conf
vi /etc/logstash/conf.d/logstash.conf
input {
beats {
port => 5044
}
}
output {
elasticsearch {
hosts => ["http://localhost:9200"]
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "changeme"
}
}
sudo vi /etc/filebeat/filebeat.yml
+ filebeat.inputs
- type: log
enabled: true
paths:
- /swlog/integral/app/admin/*.log
- /swlog/integral/app/ITS/*.log
- /swlog/integral/app/RS/*.log
+ output.logstash
filebeat.inputs:
#setup.template.settings:
#### Comment out the section output.elasticsearch: as we are not going to store logs directly to Elasticsearch
# ---------------------------- Elasticsearch Output ----------------------------
#output.elasticsearch:
# Array of hosts to connect to.
#hosts: ["localhost:9200"]
By default, Filebeat is configured to use default paths for the syslog and authorization logs. In the case of this tutorial,
you do not need to change anything in the configuration. You can see the parameters of the module in the
/etc/filebeat/modules.d/system.yml configuration file.
/etc/filebeat/modules.d/system.yml
sudo vi /etc/filebeat/filebeat.yml
# ================= Configure index lifecycle management (ILM) =================
setup.ilm.overwrite: true
setup.ilm.enabled: false
ilm.enabled: false
# ===============================================
# Configure security for the Elastic Stack #
# ===============================================
sudo systemctl stop logstash
sudo systemctl stop kibana
sudo systemctl stop elasticsearch
sudo vi /etc/elasticsearch/elasticsearch.yml
xpack.security.enabled: true
discovery.type: single-node
sudo cd /usr/share/elasticsearch/bin
[root@vnpasuatapv61 bin]# ./elasticsearch-setup-passwords interactive
Initiating the setup of passwords for reserved users
elastic,apm_system,kibana,kibana_system,logstash_system,beats_system,remote_monitoring_user.
You will be prompted to enter passwords as the process progresses.
Please confirm that you would like to continue [y/N]y
# ===============================================================
# Configure Kibana connect to Elasticsearch with a password #
# ===============================================================
sudo vi /etc/kibana/kibana.yml
elasticsearch.username: "kibana_system"
elasticsearch.password: "P@ssword123"
# ===================================================================
# Configure Logstash connect to Elasticsearch with a password #
# ===================================================================
sudo vi /etc/logstash/conf.d/logstash.conf
input {
beats {
port => 5044
}
}
output {
elasticsearch {
hosts => ["http://10.191.224.34:9200"]
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
user => "logstash_system"
password => "P@ssword123"
}
}
sudo vi /etc/logstash/logstash.yml
xpack.monitoring.enabled: false
#
=====================================================================================
======
# FIX ERROR: #
# error=>"Got response code '401' contacting Elasticsearch at URL 'http://***:9200/'"} #
#
=====================================================================================
======
https://blog.karatos.in/a?ID=01350-fb4f65d8-8189-4c36-82e8-49c14fdefa5e
Configuration in kibana:
Since it is a permission problem, then we can configure the permissions, but the problem appears in the mismatch
Login to Kibana
-> Management: Stack Management
-> Roles:
+ Create role: logstash_writer
+ Cluster privileges: manage_index_templates, monitor, manage_ilm
+ Index privileges:
- Indices: filebeat-*
- Privileges: write, delete, create_index
-> Users:
+ Create user: logstash_internal
+ Password: P@ssword123
+ Privileges: logstash_writer
sudo vi /etc/logstash/conf.d/logstash.conf
output {
elasticsearch {
hosts => ["http://10.191.224.34:9200"]
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
user => "logstash_internal"
password => "P@ssword123"
}
}
# ===================================================================
# Configure Kibana to connect to Elasticsearch with a password #
# ===================================================================
sudo vi /etc/kibana/kibana.yml
elasticsearch.username: "kibana_system"
elasticsearch.password: "P@ssword123"
Define the username password. Ensure you use the password generated above.
You can securely store the password in Kibana instead of setting it in plain text in the kibana.yml configuration file
using the command;
/usr/share/kibana/bin/kibana-keystore create
/usr/share/kibana/bin/kibana-keystore add elasticsearch.password
When prompted, enter the password for kibana_system user, which is P@ssword123
# ===============================================================
# Add other user with elasticsearch-users login to Kibana #
# ===============================================================
/usr/share/elasticsearch/bin/elasticsearch-users useradd kifarunix -r superuser
Some of the known roles include;
Read more about elasticsearch-users command on Elastic page
https://www.elastic.co/guide/en/elasticsearch/reference/current/users-command.html
cd /usr/share/elasticsearch/bins
[root@vnpasuatapv61 bin]# ./elasticsearch-users useradd shlv -r superuser
Enter new password:
# =======================================
# Changing elasticsearch-users password #
# =======================================
cd /usr/share/elasticsearch/bin
[root@vnpasuatapv61 bin]# ./elasticsearch-users passwd shlv -p P@ssword123
https://www.elastic.co/guide/en/elasticsearch/reference/current/users-command.html
If you use file-based user authentication, the elasticsearch-users command enables you to add and remove users, assign
user roles, and manage passwords per node.
Synopsisedit
/usr/share/elasticsearch/bin/elasticsearch-users
([useradd <username>] [-p <password>] [-r <roles>]) |
([list] <username>) |
([passwd <username>] [-p <password>]) |
([roles <username>] [-a <roles>] [-r <roles>]) |
([userdel <username>])
# ===============
# FIX ERROR #
# ===============
Issue: Your data is not secure
Don’t lose one bit. Enable our free security features.
sudo vi /etc/kibana/kibana.yml
Issue: server.publicBaseUrl is missing and should be configured when running in a production environment. Some
features may not behave correctly. See the documentation.
sudo vi /etc/kibana/kibana.yml
server.publicBaseUrl: "http://10.191.224.34:5601"
index: "indexname-%{+yyyy.MM.dd}"
Ex: filebeat-7.17.4-2022.06.13
/var/lib/elasticsearch/nodes/0/indices
============================================= BEGIN TMP
=============================================
sudo vi /etc/logstash/conf.d/02-beats-input.conf
input {
beats {
port => 5044
}
}
sudo vi /etc/logstash/conf.d/10-syslog-filter.conf
filter {
if [fileset][module] == "system" {
if [fileset][name] == "auth" {
grok {
match => { "message" => ["%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system]
[auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} %{DATA:
[system][auth][ssh][method]} for (invalid user )?%{DATA:[system][auth][user]} from %{IPORHOST:[system][auth]
[ssh][ip]} port %{NUMBER:[system][auth][ssh][port]} ssh2(: %{GREEDYDATA:[system][auth][ssh][signature]})?",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]}
sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} user %{DATA:[system][auth]
[user]} from %{IPORHOST:[system][auth][ssh][ip]}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]}
sshd(?:\[%{POSINT:[system][auth][pid]}\])?: Did not receive identification string from %{IPORHOST:[system][auth]
[ssh][dropped_ip]}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]}
sudo(?:\[%{POSINT:[system][auth][pid]}\])?: \s*%{DATA:[system][auth][user]} :( %{DATA:[system][auth][sudo]
[error]} ;)? TTY=%{DATA:[system][auth][sudo][tty]} ; PWD=%{DATA:[system][auth][sudo][pwd]} ; USER=%
{DATA:[system][auth][sudo][user]} ; COMMAND=%{GREEDYDATA:[system][auth][sudo][command]}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]}
groupadd(?:\[%{POSINT:[system][auth][pid]}\])?: new group: name=%{DATA:system.auth.groupadd.name}, GID=%
{NUMBER:system.auth.groupadd.gid}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]}
useradd(?:\[%{POSINT:[system][auth][pid]}\])?: new user: name=%{DATA:[system][auth][user][add][name]},
UID=%{NUMBER:[system][auth][user][add][uid]}, GID=%{NUMBER:[system][auth][user][add][gid]}, home=%
{DATA:[system][auth][user][add][home]}, shell=%{DATA:[system][auth][user][add][shell]}$",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} %
{DATA:[system][auth][program]}(?:\[%{POSINT:[system][auth][pid]}\])?: %{GREEDYMULTILINE:[system][auth]
[message]}"] }
pattern_definitions => {
"GREEDYMULTILINE"=> "(.|\n)*"
}
remove_field => "message"
}
date {
match => [ "[system][auth][timestamp]", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
geoip {
source => "[system][auth][ssh][ip]"
target => "[system][auth][ssh][geoip]"
sudo vi /etc/logstash/conf.d/30-elasticsearch-output.conf
output {
elasticsearch {
hosts => ["10.191.224.34:9200"]
manage_template => false
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
}
}
============================================= END TMP
=============================================