Publication de la configuration.

This commit is contained in:
jeltz 2020-09-19 21:39:57 +02:00
commit 56d81f0344
50 changed files with 1072 additions and 0 deletions

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
loot
!/**/.gitkeep

4
README.md Normal file
View file

@ -0,0 +1,4 @@
# Centralisation des journaux Aurore
Configuration Ansible des machines de test.

21
base.yml Normal file
View file

@ -0,0 +1,21 @@
---
- hosts: all
roles:
- base-common
- rsyslog-common
- hosts: collector.aurore.local
roles:
- rsyslog-collector
- hosts: elastic.aurore.local
roles:
- elasticsearch
- logstash
- redis
- kibana
- hosts: pki.aurore.local
roles:
- openssl-ca
...

4
group_vars/all.yml Normal file
View file

@ -0,0 +1,4 @@
---
ansible_python_interpreter: /usr/bin/python3
default_locale: en_US.UTF-8
...

View file

@ -0,0 +1,6 @@
---
rsyslog_outputs:
- proto: relp
address: 10.42.0.29
port: 2514
...

View file

@ -0,0 +1,13 @@
---
rsyslog_inputs:
- proto: relp
address: 0.0.0.0
port: 2514
rsyslog_collector_base_dir: /var/log/remote
rsyslog_outputs:
- proto: redis
address: 10.42.0.35
port: 6379
password: "P@ssw0rd!"
key: logstash
...

View file

@ -0,0 +1,35 @@
---
# Config de test pour ELK + redis pour envoi logs coll rsyslo
elasticsearch_address: 127.0.0.1
elasticsearch_port: 9200
elasticsearch_users:
logstash_internal:
password: "P@ssw0rd!"
roles:
- logstash_ingest
jeltz:
password: "P@ssw0rd!"
roles:
- superuser
# TODO: user pour kibana
redis_address: 0.0.0.0
redis_port: 6379
redis_password: "P@ssw0rd!"
redis_unix_socket: /run/redis/redis.sock
logstash_redis_path: "{{ redis_unix_socket }}"
logstash_redis_password: "{{ redis_password }}"
logstash_redis_key: logstash
logstash_es_host: "http://127.0.0.1:{{ elasticsearch_port }}"
logstash_es_username: jeltz
logstash_es_password: "{{ elasticsearch_users[logstash_es_username].password }}"
kibana_address: 0.0.0.0
kibana_port: 5601
kibana_encryption_key: "VerySecretEncryptionKeyThatNoOneWillEverGuess"
kibana_es_host: "http://127.0.0.1:{{ elasticsearch_port }}"
kibana_es_username: jeltz
kibana_es_password: "{{ elasticsearch_users[kibana_es_username].password }}"
...

View file

@ -0,0 +1,11 @@
---
root_ca_slug: aurore-root-ca
root_ca_common_name: Aurore Test Root CA
root_ca_country_name: FR
root_ca_locality_name: Gif-sur-Yvette
root_ca_state_name: Essone
root_ca_organization_name: Aurore
root_ca_email: pki@aurore.local
root_ca_not_before: +0s
root_ca_not_after: +3650d
...

8
hosts Normal file
View file

@ -0,0 +1,8 @@
collector.aurore.local
elastic.aurore.local
[remote_collected_log]
backup.aurore.local
elastic.aurore.local
modern-client.aurore.local
pki.aurore.local

0
loot/certs/.gitkeep Normal file
View file

View file

@ -0,0 +1,19 @@
---
- name: Install GnuPG (to manage the APT keystore)
become: yes
apt:
name: gnupg2
state: present
- name: Install common CA certificates
become: yes
apt:
name: ca-certificates
state: present
- name: Ensure that APT can use HTTPS repositories
become: yes
apt:
name: apt-transport-https
state: present
...

View file

@ -0,0 +1,5 @@
---
- name: Rebuild locales database
become: yes
command: dpkg-reconfigure locales -f noninteractive
...

View file

@ -0,0 +1,28 @@
---
- name: Install various useful tools
become: yes
apt:
name:
- htop
- locales
- vim
- lsof
- nmap
- socat
state: present
- name: Setup the default locale in locale.gen
become: yes
locale_gen:
name: "{{ default_locale }}"
state: present
- name: Setup the default locale in debconf
become: yes
debconf:
name: locales
question: locales/default_environment_locale
value: "{{ default_locale }}"
vtype: select
notify: Rebuild locales database
...

View file

@ -0,0 +1,4 @@
---
dependencies:
- role: apt-common
...

View file

@ -0,0 +1,20 @@
---
- name: Make APT trust Elastic GPG key
become: yes
apt_key:
url: https://artifacts.elastic.co/GPG-KEY-elasticsearch
state: present
- name: Install Elastic non-OSS repository
become: yes
apt_repository:
repo: deb https://artifacts.elastic.co/packages/7.x/apt stable main
state: present
filename: elastic-7
- name: Install default JRE
become: yes
apt:
name: default-jre-headless
state: present
...

View file

@ -0,0 +1,7 @@
---
- name: Restart elasticsearch
become: yes
systemd:
name: elasticsearch.service
state: restarted
...

View file

@ -0,0 +1,4 @@
---
dependencies:
- role: elastic-common
...

View file

@ -0,0 +1,31 @@
---
- name: Install elasticsearch
become: yes
apt:
name: elasticsearch
state: present
- name: Deploy elasticsearch configuration
become: yes
template:
src: "{{ item }}.j2"
dest: "/etc/elasticsearch/{{ item }}"
owner: elasticsearch
group: elasticsearch
mode: u=r,g=,o=
# FIXME: je pense qu'on a pas besoin de redémarrer
# Elasticsearch pour roles.yml, users_roles et users
loop:
- elasticsearch.yml
- roles.yml
- users_roles
- users
notify: Restart elasticsearch
- name: Enable elasticsearch service
become: yes
systemd:
name: elasticsearch.service
state: started
enabled: yes
...

View file

@ -0,0 +1,21 @@
---
{{ ansible_managed | comment}}
discovery.type: single-node
network.host: "{{ elasticsearch_address }}"
http.port: "{{ elasticsearch_port }}"
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
xpack.security.enabled: true
xpack.security.audit.enabled: true
# FIXME: on force l'activiation pour les clefs API (ça suffira pas
# si on est en production mode)
xpack.security.authc.api_key.enabled: true
# Désactivation de fonctionnalités inutiles.
# TODO: À compléter.
xpack.ml.enabled: false
...

View file

@ -0,0 +1,14 @@
---
{{ ansible_managed | comment}}
logstash_ingest:
indices:
- "logstash-*"
privileges:
- write
- create
- delete
- create_index
- manage
- manage_ilm
...

View file

@ -0,0 +1,23 @@
{#
FIXME: Le sel est choisi aléatoirement à chaque exécution
d'Ansible, donc on perd l'idempotence. C'est pas agréable.
On pourrait vérifier que le hash du mot de passe est le bon,
mais je pense pas que ça soit trivialement possible avec
Ansible. Sinon on peut stocker le sel dans notre configuration.
Remarque: Il existe une commande ansible, assert, qui pourrait
peut-être permettre de faire ce qu'on souhaite.
FIXME: Normalement bcrypt "2a" et "2b" sont plus ou moins
identiques. Ça serait quand même plus simple de pouvoir
spécifier l'argument "ident" dans password_hash()
(voir https://github.com/ansible/ansible/pull/21215/files).
#}
{% for name, user in elasticsearch_users.items() %}
{{ name }}:{{
user.password
| password_hash("bcrypt")
| regex_replace("^\$2b\$(.+)$", "$2a$\\1")
}}
{% endfor %}

View file

@ -0,0 +1,13 @@
{%
for role in elasticsearch_users.values()
| map(attribute="roles")
| flatten
| unique
%}
{{ role }}:{{
elasticsearch_users.items()
| selectattr("1.roles", "contains", role)
| map(attribute=0)
| join(",")
}}
{% endfor %}

View file

@ -0,0 +1,7 @@
---
- name: Restart kibana
become: yes
systemd:
name: kibana.service
state: restarted
...

View file

@ -0,0 +1,4 @@
---
dependencies:
- role: elastic-common
...

View file

@ -0,0 +1,24 @@
---
- name: Install kibana
become: yes
apt:
name: kibana
state: present
- name: Deploy kibana configuration file
become: yes
template:
src: kibana.yml.j2
dest: /etc/kibana/kibana.yml
owner: kibana
group: kibana
mode: u=r,g=,o=
notify: Restart kibana
- name: Enable kibana service
become: yes
systemd:
name: kibana.service
state: started
enabled: yes
...

View file

@ -0,0 +1,33 @@
---
{{ ansible_managed | comment }}
server.host: "{{ kibana_address }}"
server.port: "{{ kibana_port }}"
# TODO: voir s'il est préférable d'exposer directement Kibana
# ou de mettre un NGinx en frontal
#server.ssl.enabled: true
#server.ssl.certificate
#server.ssl.key
elasticsearch.hosts:
- "{{ kibana_es_host }}"
elasticsearch.username: "{{ kibana_es_username }}"
elasticsearch.password: "{{ kibana_es_password }}"
# FIXME: peut-être ne faut-il pas utiliser la même clef ?
xpack.security.encryptionKey: "{{ kibana_encryption_key }}"
xpack.encryptedSavedObjects.encryptionKey: "{{ kibana_encryption_key }}"
# Désactivation de certaines fonctionnalités inutiles.
# TODO: S'assurer qu'elles sont bien inutiles.
xpack.license_management.enabled: false
telemetry.enabled: false
telemetry.allowChangingOptInStatus: false
newsfeed.enabled: false
xpack.apm.enabled: false
xpack.ml.enabled: false
xpack.siem.enabled: false
xpack.uptime.enabled: false
xpack.monitoring.enabled: false
...

View file

@ -0,0 +1 @@
BASE64 [a-zA-Z0-9\+/]+

View file

@ -0,0 +1,2 @@
PAM_CHOICE (auth|setcred|account|session|chauthtoken)
PAM_PREFIX \(%{DATA:pam_service_name}:%{PAM_CHOICE:pam_choice}\)

View file

@ -0,0 +1,39 @@
{{ ansible_managed | comment }}
# Source: https://github.com/reallyenglish/grok-patterns-sshd
# Modifié pour ajout du traitement des empreintes de clefs
SSHD_MESSAGE_INVALID_USER Invalid user
SSHD_MESSAGE_INVALID_USER2 input_userauth_request: invalid user
SSHD_MESSAGE_ACCEPTED Accepted
SSHD_MESSAGE_BAD_PROTOCOL Bad protocol version identification
SSHD_MESSAGE_CONNECTION_CLOSED Connection closed by
SSHD_MESSAGE_DID_NOT_RECEIVE_ID Did not receive identification string from
SSHD_MESSAGE_TOO_MANY_AUTHENTICATION_FAILURES Too many authentication failures
SSHD_MESSAGE_ERROR_CONNECT error: connect_to
SSHD_MESSAGE_FATAL_READ_FROM_SOCKET_FAILED fatal: Read from socket failed
SSHD_MESSAGE_FATAL_TIMEOUT_BEFORE_AUTHENTICATION fatal: Timeout before authentication
SSHD_MESSAGE_RECEIVED_DISCONNECT Received disconnect
SSHD_MESSAGE_TIMEOUT Timeout
SSHD_ERROR_INVALID_LOGIN %{SSHD_MESSAGE_INVALID_USER:sshd_message} %{NOTSPACE:sshd_invalid_login_user} from %{IP:sshd_client_ip}
SSHD_ERROR_INVALID_LOGIN2 %{SSHD_MESSAGE_INVALID_USER2:sshd_message} %{NOTSPACE:sshd_invalid_login_user} \[preauth\]
SSHD_ERROR_BAD_PROTOCOL %{SSHD_MESSAGE_BAD_PROTOCOL:sshd_message} '%{GREEDYDATA:sshd_error_bad_protocol_name}' from %{IP:sshd_client_ip}
SSHD_ERROR_TOO_MANY_AUTHENTICATION_FAILURES Disconnecting: %{SSHD_MESSAGE_TOO_MANY_AUTHENTICATION_FAILURES:sshd_message} for %{WORD:sshd_too_many_authentication_failures_user} \[preauth\]
SSHD_ERROR_CONNECT_TO %{SSHD_MESSAGE_ERROR_CONNECT:sshd_message} %{GREEDYDATA:sshd_reason}
SSHD_ERROR_FATAL_READ_FROM_SOCKET_FAILED %{SSHD_MESSAGE_FATAL_READ_FROM_SOCKET_FAILED:sshd_message}: %{GREEDYDATA:sshd_reason}
SSHD_FATAL_TIMEOUT_BEFORE_AUTHENTICATION %{SSHD_MESSAGE_FATAL_TIMEOUT_BEFORE_AUTHENTICATION:sshd_message} for %{IP:sshd_client_ip}
SSHD_ERROR %{SSHD_ERROR_BAD_PROTOCOL}|%{SSHD_ERROR_INVALID_LOGIN}|%{SSHD_ERROR_TOO_MANY_AUTHENTICATION_FAILURES}|%{SSHD_ERROR_CONNECT_TO}|%{SSHD_ERROR_FATAL_READ_FROM_SOCKET_FAILED}|%{SSHD_FATAL_TIMEOUT_BEFORE_AUTHENTICATION}|%{SSHD_ERROR_INVALID_LOGIN2}
SSHD_KEY_FINGERPRINT %{WORD:sshd_key_type} %{WORD:sshd_key_hash_algorithm}:%{BASE64:sshd_key_hash}
SSHD_INFO_LOGIN %{SSHD_MESSAGE_ACCEPTED:sshd_message} %{WORD:sshd_login_auth_method} for %{WORD:sshd_login_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_login_port} %{WORD:sshd_login_proto}(: %{SSHD_KEY_FINGERPRINT})?
SSHD_INFO_CONNECTION_CLOSED %{SSHD_MESSAGE_CONNECTION_CLOSED:sshd_message} %{IP:sshd_client_ip} \[preauth\]
SSHD_INFO_DID_NOT_RECEIVE_ID %{SSHD_MESSAGE_DID_NOT_RECEIVE_ID:sshd_message} %{IP:sshd_client_ip}
SSHD_INFO_RECIEVED_DISCONNECT %{SSHD_MESSAGE_RECEIVED_DISCONNECT:sshd_message} from %{IP:sshd_client_ip}: %{INT}: %{DATA:sshd_reason}(?: \[preauth\])?
SSHD_INFO_TIMEOUT %{SSHD_MESSAGE_TIMEOUT:sshd_message}, %{GREEDYDATA:sshd_reason}
SSHD_INFO %{SSHD_INFO_CONNECTION_CLOSED}|%{SSHD_INFO_DID_NOT_RECEIVE_ID}|%{SSHD_INFO_LOGIN}|%{SSHD_INFO_RECIEVED_DISCONNECT}|%{SSHD_INFO_TIMEOUT}
SSHD %{SSHD_INFO}|%{SSHD_ERROR}

View file

@ -0,0 +1,7 @@
---
- name: Restart logstash
become: yes
systemd:
name: logstash.service
state: restarted
...

View file

@ -0,0 +1,4 @@
---
dependencies:
- role: elastic-common
...

View file

@ -0,0 +1,48 @@
---
- name: Install logstash
become: yes
apt:
name: logstash
state: present
- name: Create Grok patterns directory
become: yes
file:
path: /etc/logstash/patterns
state: directory
owner: root
group: root
mode: u=rwx,g=rx,o=rx
- name: Deploy Grok patterns
become: yes
copy:
src: "{{ item }}"
dest: /etc/logstash/patterns/
owner: logstash
group: logstash
mode: u=r,g=,o=
with_fileglob:
- patterns/*
- name: Deploy logstash configuration
become: yes
template:
src: "{{ item }}.j2"
dest: "/etc/logstash/conf.d/{{ item }}"
owner: logstash
group: logstash
mode: u=r,g=,o=
with_items:
- main.conf
- sshd.conf
- pam_unix.conf
notify: Restart logstash
- name: Enable logstash service
become: yes
systemd:
name: logstash.service
state: started
enabled: yes
...

View file

@ -0,0 +1,19 @@
{{ ansible_managed | comment }}
input {
redis {
path => "{{ logstash_redis_path }}"
password => "{{ logstash_redis_password }}"
data_type => "channel"
key => "{{ logstash_redis_key }}"
codec => "json"
}
}
output {
elasticsearch {
hosts => ["{{ logstash_es_host }}"]
user => "{{ logstash_es_username }}"
password => "{{ logstash_es_password }}"
}
}

View file

@ -0,0 +1,33 @@
{{ ansible_managed | comment }}
filter {
if [facility] == "authpriv" {
grok {
patterns_dir => ["/etc/logstash/patterns"]
patterns_files_glob => "*.grok"
match => {
"message" => "^pam_unix%{PAM_PREFIX}: authentication failure; logname=(%{USERNAME:pam_logname})? uid=%{INT:pam_uid} euid=%{INT:pam_euid} tty=%{TTY:pam_tty} ruser=(%{USERNAME:pam_ruser})? rhost=(%{HOSTNAME:pam_rhost})? user=%{USERNAME:pam_user}$"
}
add_tag => ["pam_unix", "pam_unix_auth_fail"]
tag_on_failure => []
}
grok {
patterns_dir => ["/etc/logstash/patterns"]
patterns_files_glob => "*.grok"
match => {
"message" => "^pam_unix%{PAM_PREFIX}: session opened for user (%{USERNAME:pam_user})?(\(uid=%{INT:pam_uid}\))? by (%{USERNAME:pam_by_user})?(\(uid=%{INT:pam_by_uid}\))?$"
}
add_tag => ["pam_unix", "pam_unix_session_opened"]
tag_on_failure => []
}
grok {
patterns_dir => ["/etc/logstash/patterns"]
patterns_files_glob => "*.grok"
match => {
"message" => "^pam_unix%{PAM_PREFIX}: session closed for user %{USERNAME:pam_user}$"
}
add_tag => ["pam_unix", "pam_unix_session_closed"]
tag_on_failure => []
}
}
}

View file

@ -0,0 +1,43 @@
{{ ansible_managed | comment }}
filter {
if [program] == "sshd" and [facility] == "auth" {
grok {
patterns_dir => ["/etc/logstash/patterns"]
patterns_files_glob => "*.grok"
match => {
"message" => "^Accepted %{WORD:sshd_auth_method} for %{USERNAME:sshd_auth_user} from %{IP:sshd_client_ip} port %{INT:sshd_client_port} %{WORD:sshd_proto_version}(: %{WORD:sshd_key_type} %{WORD:sshd_key_hash_type}:%{BASE64:sshd_key_hash})?$"
}
add_tag => ["sshd", "sshd_accepted_auth"]
tag_on_failure => []
}
grok {
match => {
"message" => "^Received disconnect from %{IP:sshd_client_ip} port %{INT:sshd_client_port}:%{INT:sshd_disconnect_reason}: %{GREEDYDATA:sshd_disconnect_msg}$"
}
add_tag => ["sshd", "sshd_received_disconnect"]
tag_on_failure => []
}
grok {
match => {
"message" => "^Disconnected from user %{USERNAME:sshd_auth_user} %{IP:sshd_client_ip} port %{INT:sshd_client_port}$"
}
add_tag => ["sshd", "sshd_disconnected"]
tag_on_failure => []
}
grok {
match => {
"message" => "^Server listening on %{IP:sshd_listen_ip} port %{INT:sshd_listen_port}.$"
}
add_tag => ["sshd", "sshd_listening"]
tag_on_failure => []
}
grok {
match => {
"message" => "^Received signal %{INT:sshd_signal}; %{GREEDYDATA:sshd_signal_action}.$"
}
add_tag => ["sshd", "sshd_received_signal"]
tag_on_failure => []
}
}
}

View file

@ -0,0 +1,21 @@
{{ ansible_managed | comment }}
filter {
if [program] == "sudo" and [facility] == "authpriv" {
grok {
# FIXME: ce n'est pas sûr du tout : en effet, la commande exécutée peut
# contenir dans ses arguments "COMMAND=" et le champ PWD peut aussi contenir
# " ; COMMAND=".
# Par exemple, en se plaçant dans des répertoires imbriqués "Documents ; USER=jeltz ; COMMAND=/bin/ls",
# on obtient :
# jeltz : TTY=pts/0 ; PWD=/home/jeltz/Documents ; COMMAND=/bin/ls ; USER=root ; COMMAND=/usr/bin/id
# La seule façon de gérer ça que je vois, c'est de lever une alerte Kibana lorsque sudo_command ou sudo_pwd
# contient "; COMMAND=" ou " ; USER=".
match => {
"message" => "^ *%{USERNAME:sudo_user} : TTY=%{DATA:sudo_tty} ; PWD=%{DATA:sudo_pwd} ; USER=%{USERNAME:sudo_elevated_user} ; COMMAND=%{DATA:sudo_command}$"
}
add_tag => ["sudo", "sudo_command"]
tag_on_failure => []
}
}
}

View file

@ -0,0 +1,4 @@
---
dependencies:
- role: openssl-common
...

View file

@ -0,0 +1,73 @@
---
- name: Create directories for storing certificates
become: true
file:
path: "/etc/{{ item.name }}"
state: directory
owner: root
group: root
mode: "{{ item.mode }}"
with_items:
- name: ssl
mode: u=rwx,g=rx,o=rx
- name: ssl/private
mode: u=rwx,g=,o=
- name: ssl/csr
mode: u=rwx,g=,o=
- name: ssl/certs
mode: u=rwx,g=rx,o=rx
- name: Generate a private key for the root CA
become: true
openssl_privatekey:
path: "/etc/ssl/private/{{ root_ca_slug }}.pem"
owner: root
group: root
mode: u=r,g=,o=
type: ECC
curve: secp384r1
- name: Generate a CSR for the root CA private key
become: true
openssl_csr:
privatekey_path: "/etc/ssl/private/{{ root_ca_slug }}.pem"
path: "/etc/ssl/csr/{{ root_ca_slug }}.csr"
owner: root
group: root
mode: u=r,g=,o=
common_name: "{{ root_ca_common_name }}"
country_name: "{{ root_ca_country_name }}"
locality_name: "{{ root_ca_locality_name }}"
organization_name: "{{ root_ca_organization_name }}"
state_or_province_name: "{{ root_ca_state_name }}"
email_address: "{{ root_ca_email }}"
use_common_name_for_san: false
basic_constraints_critical: true
basic_constraints:
- CA:TRUE
- pathlen:1
key_usage:
- keyCertSign
- cRLSign
key_usage_critical: true
# FIXME: regénérer quand le certificat n'est plus à jour
- name: Generate the root CA certificate
become: true
openssl_certificate:
privatekey_path: "/etc/ssl/private/{{ root_ca_slug }}.pem"
csr_path: "/etc/ssl/csr/{{ root_ca_slug }}.csr"
path: "/etc/ssl/certs/{{ root_ca_slug }}.pem"
owner: root
group: root
mode: u=r,g=r,o=r
provider: selfsigned
selfsigned_not_before: "{{ root_ca_not_before }}"
selfsigned_not_after: "{{ root_ca_not_after }}"
- name: Retrieve the root CA certificate
fetch:
src: "/etc/ssl/certs/{{ root_ca_slug }}.pem"
dest: "loot/certs/{{ root_ca_slug }}.pem"
flat: yes
...

View file

@ -0,0 +1,7 @@
---
- name: Install python3-cryptography library
become: true
apt:
name: python3-cryptography
state: present
...

View file

@ -0,0 +1,7 @@
---
- name: Restart redis-server
become: yes
systemd:
name: redis-server.service
state: restarted
...

View file

@ -0,0 +1,24 @@
---
- name: Install redis server
become: yes
apt:
name: redis-server
state: present
- name: Install redis configuration file
become: yes
template:
src: redis.conf.j2
dest: /etc/redis/redis.conf
owner: redis
group: redis
mode: u=r,g=,o=
notify: Restart redis-server
- name: Enable redis-server service
become: yes
systemd:
name: redis-server.service
state: started
enabled: yes
...

View file

@ -0,0 +1,86 @@
{{ ansible_managed | comment }}
bind {{ redis_address }}
port {{ redis_port }}
# port 0
# tls-port {{ redis_port }}
# On a configuré un mot de passe
protected-mode no
{% if redis_unix_socket is defined %}
unixsocket {{ redis_unix_socket }}
# FIXME: trop ouvert ? -> mdp ?
unixsocketperm 666
{% endif %}
{% if redis_password is defined %}
requirepass {{ redis_password }}
{% endif %}
# TODO: regarder la conf par défaut pour voir
# ce qu'on garde
syslog-enabled yes
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis/redis-server.pid
# FIXME: warning peut-être ?
loglevel notice
logfile /var/log/redis/redis-server.log
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /var/lib/redis
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes

View file

@ -0,0 +1,4 @@
---
dependencies:
- role: rsyslog-common
...

View file

@ -0,0 +1,18 @@
---
- name: Install rsyslog-relp if needed
become: yes
apt:
name: rsyslog-relp
state: present
when: "rsyslog_inputs | selectattr('proto', 'eq', 'relp') | list"
- name: Deploy rsyslog input configuration file
become: yes
template:
src: 90-collect.conf.j2
dest: /etc/rsyslog.d/90-collect.conf
owner: root
group: root
mode: u=rw,g=r,o=r
notify: Restart rsyslog
...

View file

@ -0,0 +1,75 @@
{{ ansible_managed | comment }}
module(load="mmrm1stspace")
{%
set input_modules = {
"relp": "imrelp",
"udp": "imudp",
}
%}
{% if rsyslog_inputs is defined %}
# Chargement des modules de collecte des logs
{%
for module in rsyslog_inputs
| map(attribute="proto")
| map("extract", input_modules)
| list
| unique
%}
module(load="{{ module }}")
{% endfor %}
{% endif %}
{% if rsyslog_collector_base_dir is defined %}
# Template pour nommage des logs collectés à distance
template(name="incomingFilename" type="list") {
constant(value="{{ rsyslog_collector_base_dir }}/")
property(name="fromhost-ip")
constant(value="/")
property(name="timegenerated" dateFormat="year")
constant(value="-")
property(name="timegenerated" dateFormat="month")
constant(value="-")
property(name="timegenerated" dateFormat="day")
constant(value=".log")
}
{% endif %}
{% if rsyslog_inputs %}
ruleset(name="handleIncomingLogs") {
action(type="mmrm1stspace")
{% if rsyslog_collector_base_dir is defined %}
action(
type="omfile"
dynaFile="incomingFilename"
template="RSYSLOG_FileFormat"
)
{% endif %}
call sendLogsToRemote
}
{% endif %}
{% for input in rsyslog_inputs %}
# TODO: ajouter les options par protocole (TLS p.ex.) ?
input(
type="{{ input_modules[input.proto] }}"
address="{{ input.address }}"
port="{{ input.port }}"
ruleset="handleIncomingLogs"
)
{% endfor %}
{#
# 4. Traitement des journaux "fichiers" à envoyer au collecteur (30-{{app_name}}.conf)
-> {{app_name}}
input(
type="imfile"
ruleset="sendLogsRemote"
)
#}

View file

@ -0,0 +1,13 @@
---
- name: Restart rsyslog
become: yes
systemd:
name: rsyslog.service
state: restarted
- name: Restart systemd-journald
become: yes
systemd:
name: systemd-journald.service
state: restarted
...

View file

@ -0,0 +1,57 @@
---
- name: Install rsyslog
become: yes
apt:
name: rsyslog
state: present
- name: Install rsyslog modules if needed
become: yes
apt:
name: " {{ item.pkg }}"
state: present
when: "rsyslog_outputs | selectattr('proto', 'eq', item.proto) | list"
loop:
- proto: relp
pkg: rsyslog-relp
- proto: redis
pkg: rsyslog-hiredis
- name: Deploy main rsyslog configuration
become: yes
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
mode: u=rw,g=r,o=r
notify: Restart rsyslog
loop:
- src: rsyslog.conf.j2
dest: /etc/rsyslog.conf
- src: 10-common.conf.j2
dest: /etc/rsyslog.d/10-common.conf
- name: Create journald.conf.d directory
become: yes
file:
path: /etc/systemd/journald.conf.d
state: directory
- name: Deploy journald configuration
become: yes
template:
src: forward-syslog.conf.j2
dest: /etc/systemd/journald.conf.d/forward-syslog.conf
owner: root
group: root
mode: u=rw,g=r,o=r
notify: Restart systemd-journald
- name: Enable rsyslog service
become: yes
systemd:
name: rsyslog.service
state: started
enabled: yes
...

View file

@ -0,0 +1,116 @@
{{ ansible_managed | comment }}
{%
set output_modules = {
"relp": "omrelp",
"zeromq": "omczmq",
"udp": "omfwd",
"redis": "omhiredis",
}
%}
global(
workDirectory="/var/spool/rsyslog"
preserveFQDN="on"
)
# Collecte des journaux via /dev/log
module(load="imuxsock")
# Collection des journaux du noyau
module(load="imklog")
# Collecte des journaux de systemd-journald
module(load="imjournal")
# FIXME: utile pour systemd-journald ?
# Traitement des journaux CEE
module(load="mmjsonparse")
{% if rsyslog_outputs is defined %}
# Chargement des modules d'export des logs
{%
for module in rsyslog_outputs
| map(attribute="proto")
| map("extract", output_modules)
| list
| unique
%}
module(load="{{ module }}")
{% endfor %}
{% endif %}
# FIXME: Attention, il faut voir si rsyslog arrive bien à créer
# les fichiers de plusieurs jours (le 1er est peut-être crée avant
# de dropper les privilèges, mais les suivants je pense pas).
module(
load="builtin:omfile"
# Format avec dates précises
template="RSYSLOG_FileFormat"
fileOwner="root"
fileGroup="adm"
fileCreateMode="0640"
dirCreateMode="0755"
)
template(name="templateJson" type="list" option.jsonf="on") {
property(outname="hostname_reported" name="hostname" format="jsonf")
property(outname="src" name="fromhost-ip" format="jsonf")
property(outname="facility" name="syslogfacility-text" format="jsonf")
property(outname="program" name="programname" format="jsonf")
property(outname="pid" name="procid" format="jsonf")
property(outname="time_reported" name="timereported" format="jsonf" dateformat="rfc3339")
property(outname="time_generated" name="timegenerated" format="jsonf" dateformat="rfc3339")
property(outname="message" name="msg" format="jsonf")
}
ruleset(name="sendLogsToDisk") {
auth,authpriv.* action(type="omfile" file="/var/log/auth.log")
mail.* action(type="omfile" file="/var/log/mail.log" sync="off")
kern.* action(type="omfile" file="/var/log/kern.log")
*.*;auth,authpriv.none action(type="omfile" file="/var/log/syslog.log" sync="off")
# FIXME: On peut en rajouter pour correspondre plus à la configuration
# Debian de base. C'est redondant, mais si c'est souhaité, il n'y a
# pas de problème.
}
# Ruleset pour envoi des journaux à distance
ruleset(name="sendLogsToRemote") {
{% if rsyslog_outputs is defined %}
{% for output in rsyslog_outputs %}
action(
type="{{ output_modules[output.proto] }}"
{% if output_modules[output.proto] == "omfwd" %}
protocol="{{ output.proto }}"
target="{{ output.address }}"
port="{{ output.port }}"
{% elif output_modules[output.proto] == "omhiredis" %}
server="{{ output.address }}"
serverport="{{ output.port }}"
mode="publish"
key="{{ output.key }}"
template="templateJson"
{% if output.password is defined %}
serverpassword="{{ output.password }}"
{% endif %}
{% elif output_modules[output.proto] == "omrelp" %}
target="{{ output.address }}"
port="{{ output.port }}"
{% endif %}
{% if loop.index > 1 and output.fallback %}
action.execOnlyWhenPreviousIsSuspended="on"
{% endif %}
)
{% endfor %}
{% endif %}
}
# On envoie les journaux locaux vers des fichiers pour aider
# au débogage sans avoir à passer par le système de centralisation
call sendLogsToDisk
# On envoie tous les journaux locaux à distance
call sendLogsToRemote

View file

@ -0,0 +1,7 @@
{{ ansible_managed | comment }}
[Journal]
# FIXME: si on met yes, ça duplique les logs, sans doute
# parce que imjournal gère déjà la collecte
ForwardToSyslog=no
MaxLevelSyslog=debug

View file

@ -0,0 +1,3 @@
{{ ansible_managed | comment }}
include(file="/etc/rsyslog.d/*.conf")